summaryrefslogtreecommitdiff
path: root/drivers/pci/controller
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/controller')
-rw-r--r--drivers/pci/controller/Kconfig445
-rw-r--r--drivers/pci/controller/Makefile4
-rw-r--r--drivers/pci/controller/cadence/Kconfig53
-rw-r--r--drivers/pci/controller/cadence/Makefile12
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c308
-rw-r--r--drivers/pci/controller/cadence/pci-sky1.c238
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c134
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host-common.c288
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host-common.h46
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host-hpa.c368
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c361
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-hpa-regs.h193
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-hpa.c167
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-lga-regs.h230
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-plat.c17
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c47
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h512
-rw-r--r--drivers/pci/controller/cadence/pcie-sg2042.c131
-rw-r--r--drivers/pci/controller/dwc/Kconfig563
-rw-r--r--drivers/pci/controller/dwc/Makefile14
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c80
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c131
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c1960
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c452
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c154
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c291
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c40
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c19
-rw-r--r--drivers/pci/controller/dwc/pcie-amd-mdb.c526
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c48
-rw-r--r--drivers/pci/controller/dwc/pcie-bt1.c12
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-debugfs.c927
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c876
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c714
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c27
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c666
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h450
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c565
-rw-r--r--drivers/pci/controller/dwc/pcie-fu740.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c30
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c20
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c55
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c205
-rw-r--r--drivers/pci/controller/dwc/pcie-nxp-s32g.c406
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.c88
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.h14
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c225
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c2075
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c804
-rw-r--r--drivers/pci/controller/dwc/pcie-sophgo.c257
-rw-r--r--drivers/pci/controller/dwc/pcie-spacemit-k1.c357
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c11
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32-ep.c343
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.c370
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.h19
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c300
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c45
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c16
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c6
-rw-r--r--drivers/pci/controller/mobiveil/Kconfig20
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c14
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c61
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c1
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h5
-rw-r--r--drivers/pci/controller/pci-aardvark.c144
-rw-r--r--drivers/pci/controller/pci-ftpci100.c21
-rw-r--r--drivers/pci/controller/pci-host-common.c55
-rw-r--r--drivers/pci/controller/pci-host-common.h23
-rw-r--r--drivers/pci/controller/pci-host-generic.c3
-rw-r--r--drivers/pci/controller/pci-hyperv-intf.c1
-rw-r--r--drivers/pci/controller/pci-hyperv.c758
-rw-r--r--drivers/pci/controller/pci-ixp4xx.c19
-rw-r--r--drivers/pci/controller/pci-loongson.c94
-rw-r--r--drivers/pci/controller/pci-mvebu.c61
-rw-r--r--drivers/pci/controller/pci-rcar-gen2.c3
-rw-r--r--drivers/pci/controller/pci-tegra.c194
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c4
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c5
-rw-r--r--drivers/pci/controller/pci-v3-semi.c6
-rw-r--r--drivers/pci/controller/pci-versatile.c1
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c484
-rw-r--r--drivers/pci/controller/pci-xgene.c46
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c56
-rw-r--r--drivers/pci/controller/pcie-altera.c279
-rw-r--r--drivers/pci/controller/pcie-apple.c396
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c1061
-rw-r--r--drivers/pci/controller/pcie-hisi-error.c5
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c51
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c6
-rw-r--r--drivers/pci/controller/pcie-iproc.c46
-rw-r--r--drivers/pci/controller/pcie-iproc.h2
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c559
-rw-r--r--drivers/pci/controller/pcie-mediatek.c205
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c1140
-rw-r--r--drivers/pci/controller/pcie-mt7621.c28
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c37
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c185
-rw-r--r--drivers/pci/controller/pcie-rcar.h2
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c622
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c87
-rw-r--r--drivers/pci/controller/pcie-rockchip.c250
-rw-r--r--drivers/pci/controller/pcie-rockchip.h163
-rw-r--r--drivers/pci/controller/pcie-rzg3s-host.c1761
-rw-r--r--drivers/pci/controller/pcie-xilinx-common.h31
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c155
-rw-r--r--drivers/pci/controller/pcie-xilinx-dma-pl.c847
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c266
-rw-r--r--drivers/pci/controller/pcie-xilinx.c68
-rw-r--r--drivers/pci/controller/plda/Kconfig31
-rw-r--r--drivers/pci/controller/plda/Makefile4
-rw-r--r--drivers/pci/controller/plda/pcie-microchip-host.c834
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c651
-rw-r--r--drivers/pci/controller/plda/pcie-plda.h274
-rw-r--r--drivers/pci/controller/plda/pcie-starfive.c492
-rw-r--r--drivers/pci/controller/vmd.c485
117 files changed, 22825 insertions, 8004 deletions
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 1569d9a3ada0..c254d2b8bf17 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -3,17 +3,9 @@
menu "PCI controller drivers"
depends on PCI
-config PCI_MVEBU
- tristate "Marvell EBU PCIe controller"
- depends on ARCH_MVEBU || ARCH_DOVE || COMPILE_TEST
- depends on MVEBU_MBUS
- depends on ARM
- depends on OF
- select PCI_BRIDGE_EMUL
- help
- Add support for Marvell EBU PCIe controller. This PCIe controller
- is used on 32-bit Marvell ARM SoCs: Dove, Kirkwood, Armada 370,
- Armada XP, Armada 375, Armada 38x and Armada 39x.
+config PCI_HOST_COMMON
+ tristate
+ select PCI_ECAM
config PCI_AARDVARK
tristate "Aardvark PCIe controller"
@@ -21,125 +13,63 @@ config PCI_AARDVARK
depends on OF
depends on PCI_MSI
select PCI_BRIDGE_EMUL
+ select IRQ_MSI_LIB
help
Add support for Aardvark 64bit PCIe Host Controller. This
controller is part of the South Bridge of the Marvel Armada
3700 SoC.
-config PCIE_XILINX_NWL
- bool "NWL PCIe Core"
- depends on ARCH_ZYNQMP || COMPILE_TEST
- depends on PCI_MSI
- help
- Say 'Y' here if you want kernel support for Xilinx
- NWL PCIe controller. The controller can act as Root Port
- or End Point. The current option selection will only
- support root port enabling.
-
-config PCI_FTPCI100
- bool "Faraday Technology FTPCI100 PCI controller"
- depends on OF
- default ARCH_GEMINI
-
-config PCI_IXP4XX
- bool "Intel IXP4xx PCI controller"
- depends on ARM && OF
- depends on ARCH_IXP4XX || COMPILE_TEST
- default ARCH_IXP4XX
- help
- Say Y here if you want support for the PCI host controller found
- in the Intel IXP4xx XScale-based network processor SoC.
-
-config PCI_TEGRA
- bool "NVIDIA Tegra PCIe controller"
- depends on ARCH_TEGRA || COMPILE_TEST
- depends on PCI_MSI
- help
- Say Y here if you want support for the PCIe host controller found
- on NVIDIA Tegra SoCs.
-
-config PCI_RCAR_GEN2
- bool "Renesas R-Car Gen2 Internal PCI controller"
- depends on ARCH_RENESAS || COMPILE_TEST
- depends on ARM
+config PCIE_ALTERA
+ tristate "Altera PCIe controller"
+ depends on ARM || NIOS2 || ARM64 || COMPILE_TEST
help
- Say Y here if you want internal PCI support on R-Car Gen2 SoC.
- There are 3 internal PCI controllers available with a single
- built-in EHCI/OHCI host controller present on each one.
+ Say Y here if you want to enable PCIe controller support on Altera
+ FPGA.
-config PCIE_RCAR_HOST
- bool "Renesas R-Car PCIe host controller"
- depends on ARCH_RENESAS || COMPILE_TEST
+config PCIE_ALTERA_MSI
+ tristate "Altera PCIe MSI feature"
+ depends on PCIE_ALTERA
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
- Say Y here if you want PCIe controller support on R-Car SoCs in host
- mode.
-
-config PCIE_RCAR_EP
- bool "Renesas R-Car PCIe endpoint controller"
- depends on ARCH_RENESAS || COMPILE_TEST
- depends on PCI_ENDPOINT
- help
- Say Y here if you want PCIe controller support on R-Car SoCs in
- endpoint mode.
+ Say Y here if you want PCIe MSI support for the Altera FPGA.
+ This MSI driver supports Altera MSI to GIC controller IP.
-config PCI_HOST_COMMON
- tristate
- select PCI_ECAM
+config PCIE_APPLE_MSI_DOORBELL_ADDR
+ hex
+ default 0xfffff000
+ depends on PCIE_APPLE
-config PCI_HOST_GENERIC
- tristate "Generic PCI host controller"
+config PCIE_APPLE
+ tristate "Apple PCIe controller"
+ depends on ARCH_APPLE || COMPILE_TEST
depends on OF
- select PCI_HOST_COMMON
- select IRQ_DOMAIN
- help
- Say Y here if you want to support a simple generic PCI host
- controller, such as the one emulated by kvmtool.
-
-config PCIE_XILINX
- bool "Xilinx AXI PCIe host bridge support"
- depends on OF || COMPILE_TEST
depends on PCI_MSI
- help
- Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
- Host Bridge driver.
-
-config PCIE_XILINX_CPM
- bool "Xilinx Versal CPM host bridge support"
- depends on ARCH_ZYNQMP || COMPILE_TEST
select PCI_HOST_COMMON
+ select IRQ_MSI_LIB
help
- Say 'Y' here if you want kernel support for the
- Xilinx Versal CPM host bridge.
-
-config PCI_XGENE
- bool "X-Gene PCIe controller"
- depends on ARM64 || COMPILE_TEST
- depends on OF || (ACPI && PCI_QUIRKS)
- help
- Say Y here if you want internal PCI support on APM X-Gene SoC.
- There are 5 internal PCIe ports available. Each port is GEN3 capable
- and have varied lanes from x1 to x8.
-
-config PCI_XGENE_MSI
- bool "X-Gene v1 PCIe MSI feature"
- depends on PCI_XGENE
- depends on PCI_MSI
- default y
- help
- Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
- This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC.
+ Say Y here if you want to enable PCIe controller support on Apple
+ system-on-chips, like the Apple M1. This is required for the USB
+ type-A ports, Ethernet, Wi-Fi, and Bluetooth.
-config PCI_V3_SEMI
- bool "V3 Semiconductor PCI controller"
- depends on OF
- depends on ARM || COMPILE_TEST
- default ARCH_INTEGRATOR_AP
+ If unsure, say Y if you have an Apple Silicon system.
config PCI_VERSATILE
bool "ARM Versatile PB PCI controller"
depends on ARCH_VERSATILE || COMPILE_TEST
+config PCIE_BRCMSTB
+ tristate "Broadcom Brcmstb PCIe controller"
+ depends on ARCH_BRCMSTB || ARCH_BCM2835 || ARCH_BCMBCA || \
+ BMIPS_GENERIC || COMPILE_TEST
+ depends on OF
+ depends on PCI_MSI
+ select IRQ_MSI_LIB
+ default ARCH_BRCMSTB || BMIPS_GENERIC
+ help
+ Say Y here to enable PCIe host controller support for
+ Broadcom STB based SoCs, like the Raspberry Pi 4.
+
config PCIE_IPROC
tristate
help
@@ -158,7 +88,7 @@ config PCIE_IPROC_PLATFORM
through the generic platform bus interface
config PCIE_IPROC_BCMA
- tristate "Broadcom iProc PCIe BCMA bus driver"
+ tristate "Broadcom iProc BCMA PCIe controller"
depends on ARM && (ARCH_BCM_IPROC || COMPILE_TEST)
select PCIE_IPROC
select BCMA
@@ -171,26 +101,12 @@ config PCIE_IPROC_MSI
bool "Broadcom iProc PCIe MSI support"
depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA
depends on PCI_MSI
+ select IRQ_MSI_LIB
default ARCH_BCM_IPROC
help
Say Y here if you want to enable MSI support for Broadcom's iProc
PCIe controller
-config PCIE_ALTERA
- tristate "Altera PCIe controller"
- depends on ARM || NIOS2 || ARM64 || COMPILE_TEST
- help
- Say Y here if you want to enable PCIe controller support on Altera
- FPGA.
-
-config PCIE_ALTERA_MSI
- tristate "Altera PCIe MSI feature"
- depends on PCIE_ALTERA
- depends on PCI_MSI
- help
- Say Y here if you want PCIe MSI support for the Altera FPGA.
- This MSI driver supports Altera MSI to GIC controller IP.
-
config PCI_HOST_THUNDER_PEM
bool "Cavium Thunder PCIe controller to off-chip devices"
depends on ARM64 || COMPILE_TEST
@@ -207,47 +123,89 @@ config PCI_HOST_THUNDER_ECAM
help
Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
-config PCIE_ROCKCHIP
- bool
- depends on PCI
+config PCI_FTPCI100
+ bool "Faraday Technology FTPCI100 PCI controller"
+ depends on OF
+ default ARCH_GEMINI
-config PCIE_ROCKCHIP_HOST
- tristate "Rockchip PCIe host controller"
- depends on ARCH_ROCKCHIP || COMPILE_TEST
+config PCI_HOST_GENERIC
+ tristate "Generic PCI host controller"
depends on OF
- depends on PCI_MSI
- select MFD_SYSCON
- select PCIE_ROCKCHIP
+ select PCI_HOST_COMMON
+ select IRQ_DOMAIN
help
- Say Y here if you want internal PCI support on Rockchip SoC.
- There is 1 internal PCIe port available to support GEN2 with
- 4 slots.
+ Say Y here if you want to support a simple generic PCI host
+ controller, such as the one emulated by kvmtool.
-config PCIE_ROCKCHIP_EP
- bool "Rockchip PCIe endpoint controller"
- depends on ARCH_ROCKCHIP || COMPILE_TEST
+config PCIE_HISI_ERR
+ depends on ACPI_APEI_GHES && (ARM64 || COMPILE_TEST)
+ bool "HiSilicon HIP PCIe controller error handling driver"
+ help
+ Say Y here if you want error handling support
+ for the PCIe controller's errors on HiSilicon HIP SoCs
+
+config PCI_IXP4XX
+ bool "Intel IXP4xx PCI controller"
depends on OF
- depends on PCI_ENDPOINT
- select MFD_SYSCON
- select PCIE_ROCKCHIP
+ depends on ARCH_IXP4XX || COMPILE_TEST
+ default ARCH_IXP4XX
help
- Say Y here if you want to support Rockchip PCIe controller in
- endpoint mode on Rockchip SoC. There is 1 internal PCIe port
- available to support GEN2 with 4 slots.
+ Say Y here if you want support for the PCI host controller found
+ in the Intel IXP4xx XScale-based network processor SoC.
+
+config VMD
+ depends on PCI_MSI && X86_64 && !UML
+ tristate "Intel Volume Management Device Driver"
+ select IRQ_MSI_LIB
+ help
+ Adds support for the Intel Volume Management Device (VMD). VMD is a
+ secondary PCI host bridge that allows PCI Express root ports,
+ and devices attached to them, to be removed from the default
+ PCI domain and placed within the VMD domain. This provides
+ more bus resources than are otherwise possible with a
+ single domain. If you know your system provides one of these and
+ has devices attached to it, say Y; if you are not sure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vmd.
+
+config PCI_LOONGSON
+ bool "LOONGSON PCIe controller"
+ depends on MACH_LOONGSON64 || COMPILE_TEST
+ depends on OF || ACPI
+ depends on PCI_QUIRKS
+ default MACH_LOONGSON64
+ help
+ Say Y here if you want to enable PCI controller support on
+ Loongson systems.
+
+config PCI_MVEBU
+ tristate "Marvell EBU PCIe controller"
+ depends on ARCH_MVEBU || ARCH_DOVE || COMPILE_TEST
+ depends on MVEBU_MBUS
+ depends on ARM
+ depends on OF
+ select PCI_BRIDGE_EMUL
+ help
+ Add support for Marvell EBU PCIe controller. This PCIe controller
+ is used on 32-bit Marvell ARM SoCs: Dove, Kirkwood, Armada 370,
+ Armada XP, Armada 375, Armada 38x and Armada 39x.
config PCIE_MEDIATEK
tristate "MediaTek PCIe controller"
depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
depends on OF
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say Y here if you want to enable PCIe controller support on
MediaTek SoCs.
config PCIE_MEDIATEK_GEN3
tristate "MediaTek Gen3 PCIe controller"
- depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
This PCIe controller is compatible with Gen3, Gen2 and Gen1 speed,
@@ -257,91 +215,160 @@ config PCIE_MEDIATEK_GEN3
Say Y here if you want to enable Gen3 PCIe controller support on
MediaTek SoCs.
-config VMD
- depends on PCI_MSI && X86_64 && SRCU && !UML
- tristate "Intel Volume Management Device Driver"
+config PCIE_MT7621
+ tristate "MediaTek MT7621 PCIe controller"
+ depends on SOC_MT7621 || COMPILE_TEST
+ select PHY_MT7621_PCI
+ default SOC_MT7621
help
- Adds support for the Intel Volume Management Device (VMD). VMD is a
- secondary PCI host bridge that allows PCI Express root ports,
- and devices attached to them, to be removed from the default
- PCI domain and placed within the VMD domain. This provides
- more bus resources than are otherwise possible with a
- single domain. If you know your system provides one of these and
- has devices attached to it, say Y; if you are not sure, say N.
+ This selects a driver for the MediaTek MT7621 PCIe Controller.
- To compile this driver as a module, choose M here: the
- module will be called vmd.
+config PCI_HYPERV_INTERFACE
+ tristate "Microsoft Hyper-V PCI Interface"
+ depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI
+ help
+ The Hyper-V PCI Interface is a helper driver that allows other
+ drivers to have a common interface with the Hyper-V PCI frontend
+ driver.
-config PCIE_BRCMSTB
- tristate "Broadcom Brcmstb PCIe host controller"
- depends on ARCH_BRCMSTB || ARCH_BCM2835 || ARCH_BCMBCA || \
- BMIPS_GENERIC || COMPILE_TEST
- depends on OF
+config PCI_TEGRA
+ bool "NVIDIA Tegra PCIe controller"
+ depends on ARCH_TEGRA || COMPILE_TEST
depends on PCI_MSI
- default ARCH_BRCMSTB || BMIPS_GENERIC
+ select IRQ_MSI_LIB
help
- Say Y here to enable PCIe host controller support for
- Broadcom STB based SoCs, like the Raspberry Pi 4.
+ Say Y here if you want support for the PCIe host controller found
+ on NVIDIA Tegra SoCs.
-config PCI_HYPERV_INTERFACE
- tristate "Hyper-V PCI Interface"
- depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && PCI_MSI
+config PCIE_RCAR_HOST
+ bool "Renesas R-Car PCIe controller (host mode)"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PCI_MSI
+ select IRQ_MSI_LIB
help
- The Hyper-V PCI Interface is a helper driver allows other drivers to
- have a common interface with the Hyper-V PCI frontend driver.
+ Say Y here if you want PCIe controller support on R-Car SoCs in host
+ mode.
-config PCI_LOONGSON
- bool "LOONGSON PCI Controller"
- depends on MACH_LOONGSON64 || COMPILE_TEST
- depends on OF || ACPI
- depends on PCI_QUIRKS
- default MACH_LOONGSON64
+config PCIE_RCAR_EP
+ bool "Renesas R-Car PCIe controller (endpoint mode)"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PCI_ENDPOINT
help
- Say Y here if you want to enable PCI controller support on
- Loongson systems.
+ Say Y here if you want PCIe controller support on R-Car SoCs in
+ endpoint mode.
-config PCIE_MICROCHIP_HOST
- bool "Microchip AXI PCIe host bridge support"
- depends on PCI_MSI && OF
- select PCI_HOST_COMMON
+config PCI_RCAR_GEN2
+ bool "Renesas R-Car Gen2 Internal PCI controller"
+ depends on (ARCH_RENESAS && ARM) || COMPILE_TEST
help
- Say Y here if you want kernel to support the Microchip AXI PCIe
- Host Bridge driver.
+ Say Y here if you want internal PCI support on R-Car Gen2 SoC.
+ Each internal PCI controller contains a single built-in EHCI/OHCI
+ host controller.
-config PCIE_HISI_ERR
- depends on ACPI_APEI_GHES && (ARM64 || COMPILE_TEST)
- bool "HiSilicon HIP PCIe controller error handling driver"
+config PCIE_RENESAS_RZG3S_HOST
+ bool "Renesas RZ/G3S PCIe host controller"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select MFD_SYSCON
+ select IRQ_MSI_LIB
help
- Say Y here if you want error handling support
- for the PCIe controller's errors on HiSilicon HIP SoCs
+ Say Y here if you want PCIe host controller support on Renesas RZ/G3S
+ SoC.
-config PCIE_APPLE_MSI_DOORBELL_ADDR
- hex
- default 0xfffff000
- depends on PCIE_APPLE
+config PCIE_ROCKCHIP
+ bool
+ depends on PCI
-config PCIE_APPLE
- tristate "Apple PCIe controller"
- depends on ARCH_APPLE || COMPILE_TEST
+config PCIE_ROCKCHIP_HOST
+ tristate "Rockchip PCIe controller (host mode)"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
depends on PCI_MSI
+ select MFD_SYSCON
+ select PCIE_ROCKCHIP
+ help
+ Say Y here if you want internal PCI support on Rockchip SoC.
+ There is 1 internal PCIe port available to support GEN2 with
+ 4 slots.
+
+config PCIE_ROCKCHIP_EP
+ bool "Rockchip PCIe controller (endpoint mode)"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ depends on PCI_ENDPOINT
+ select MFD_SYSCON
+ select PCIE_ROCKCHIP
+ help
+ Say Y here if you want to support Rockchip PCIe controller in
+ endpoint mode on Rockchip SoC. There is 1 internal PCIe port
+ available to support GEN2 with 4 slots.
+
+config PCI_V3_SEMI
+ bool "V3 Semiconductor PCI controller"
+ depends on OF
+ depends on ARM || COMPILE_TEST
+ default ARCH_INTEGRATOR_AP
+
+config PCI_XGENE
+ bool "X-Gene PCIe controller"
+ depends on ARM64 || COMPILE_TEST
+ depends on OF || (ACPI && PCI_QUIRKS)
+ help
+ Say Y here if you want internal PCI support on APM X-Gene SoC.
+ There are 5 internal PCIe ports available. Each port is GEN3 capable
+ and have varied lanes from x1 to x8.
+
+config PCI_XGENE_MSI
+ bool "X-Gene v1 PCIe MSI feature"
+ depends on PCI_XGENE
+ depends on PCI_MSI
+ select IRQ_MSI_LIB
+ default y
+ help
+ Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
+ This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC.
+
+config PCIE_XILINX
+ bool "Xilinx AXI PCIe controller"
+ depends on OF
+ depends on PCI_MSI
+ select IRQ_MSI_LIB
+ help
+ Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
+ Host Bridge driver.
+
+config PCIE_XILINX_DMA_PL
+ bool "Xilinx DMA PL PCIe host bridge support"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on PCI_MSI
select PCI_HOST_COMMON
+ select IRQ_MSI_LIB
help
- Say Y here if you want to enable PCIe controller support on Apple
- system-on-chips, like the Apple M1. This is required for the USB
- type-A ports, Ethernet, Wi-Fi, and Bluetooth.
+ Say 'Y' here if you want kernel support for the Xilinx PL DMA
+ PCIe host bridge. The controller is a Soft IP which can act as
+ Root Port. If your system provides Xilinx PCIe host controller
+ bridge DMA as Soft IP say 'Y'; if you are not sure, say 'N'.
- If unsure, say Y if you have an Apple Silicon system.
+config PCIE_XILINX_NWL
+ bool "Xilinx NWL PCIe controller"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on PCI_MSI
+ select IRQ_MSI_LIB
+ help
+ Say 'Y' here if you want kernel support for Xilinx
+ NWL PCIe controller. The controller can act as Root Port
+ or End Point. The current option selection will only
+ support root port enabling.
-config PCIE_MT7621
- tristate "MediaTek MT7621 PCIe Controller"
- depends on SOC_MT7621 || COMPILE_TEST
- select PHY_MT7621_PCI
- default SOC_MT7621
+config PCIE_XILINX_CPM
+ bool "Xilinx Versal CPM PCI controller"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ select PCI_HOST_COMMON
help
- This selects a driver for the MediaTek MT7621 PCIe Controller.
+ Say 'Y' here if you want kernel support for the
+ Xilinx Versal CPM host bridge.
+source "drivers/pci/controller/cadence/Kconfig"
source "drivers/pci/controller/dwc/Kconfig"
source "drivers/pci/controller/mobiveil/Kconfig"
-source "drivers/pci/controller/cadence/Kconfig"
+source "drivers/pci/controller/plda/Kconfig"
endmenu
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index 37c8663de7fe..229929a945c2 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
obj-$(CONFIG_PCIE_RCAR_HOST) += pcie-rcar.o pcie-rcar-host.o
obj-$(CONFIG_PCIE_RCAR_EP) += pcie-rcar.o pcie-rcar-ep.o
+obj-$(CONFIG_PCIE_RENESAS_RZG3S_HOST) += pcie-rzg3s-host.o
obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
@@ -17,6 +18,7 @@ obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
obj-$(CONFIG_PCIE_XILINX_CPM) += pcie-xilinx-cpm.o
+obj-$(CONFIG_PCIE_XILINX_DMA_PL) += pcie-xilinx-dma-pl.o
obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
@@ -32,7 +34,6 @@ obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
obj-$(CONFIG_PCIE_MEDIATEK_GEN3) += pcie-mediatek-gen3.o
-obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o
obj-$(CONFIG_VMD) += vmd.o
obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o
@@ -43,6 +44,7 @@ obj-$(CONFIG_PCIE_MT7621) += pcie-mt7621.o
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
obj-y += dwc/
obj-y += mobiveil/
+obj-y += plda/
# The following drivers are for devices that use the generic ACPI
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
index 5d30564190e1..9e651d545973 100644
--- a/drivers/pci/controller/cadence/Kconfig
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -1,28 +1,28 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Cadence PCIe controllers support"
+menu "Cadence-based PCIe controllers"
depends on PCI
config PCIE_CADENCE
- bool
+ tristate
config PCIE_CADENCE_HOST
- bool
+ tristate
depends on OF
select IRQ_DOMAIN
select PCIE_CADENCE
config PCIE_CADENCE_EP
- bool
+ tristate
depends on OF
depends on PCI_ENDPOINT
select PCIE_CADENCE
config PCIE_CADENCE_PLAT
- bool
+ tristate
config PCIE_CADENCE_PLAT_HOST
- bool "Cadence PCIe platform host controller"
+ tristate "Cadence platform PCIe controller (host mode)"
depends on OF
select PCIE_CADENCE_HOST
select PCIE_CADENCE_PLAT
@@ -32,23 +32,49 @@ config PCIE_CADENCE_PLAT_HOST
vendors SoCs.
config PCIE_CADENCE_PLAT_EP
- bool "Cadence PCIe platform endpoint controller"
+ tristate "Cadence platform PCIe controller (endpoint mode)"
depends on OF
depends on PCI_ENDPOINT
select PCIE_CADENCE_EP
select PCIE_CADENCE_PLAT
help
- Say Y here if you want to support the Cadence PCIe platform controller in
+ Say Y here if you want to support the Cadence PCIe platform controller in
endpoint mode. This PCIe controller may be embedded into many
different vendors SoCs.
+config PCI_SKY1_HOST
+ tristate "CIX SKY1 PCIe controller (host mode)"
+ depends on OF && (ARCH_CIX || COMPILE_TEST)
+ select PCIE_CADENCE_HOST
+ select PCI_ECAM
+ help
+ Say Y here if you want to support the CIX SKY1 PCIe platform
+ controller in host mode. CIX SKY1 PCIe controller uses Cadence
+ HPA (High Performance Architecture IP [Second generation of
+ Cadence PCIe IP])
+
+ This driver requires Cadence PCIe core infrastructure
+ (PCIE_CADENCE_HOST) and hardware platform adaptation layer
+ to function.
+
+config PCIE_SG2042_HOST
+ tristate "Sophgo SG2042 PCIe controller (host mode)"
+ depends on OF && (ARCH_SOPHGO || COMPILE_TEST)
+ select PCIE_CADENCE_HOST
+ help
+ Say Y here if you want to support the Sophgo SG2042 PCIe platform
+ controller in host mode. Sophgo SG2042 PCIe controller uses Cadence
+ PCIe core.
+
config PCI_J721E
- bool
+ tristate
+ select PCIE_CADENCE_HOST if PCI_J721E_HOST != n
+ select PCIE_CADENCE_EP if PCI_J721E_EP != n
config PCI_J721E_HOST
- bool "TI J721E PCIe platform host controller"
+ tristate "TI J721E PCIe controller (host mode)"
+ depends on ARCH_K3 || COMPILE_TEST
depends on OF
- select PCIE_CADENCE_HOST
select PCI_J721E
help
Say Y here if you want to support the TI J721E PCIe platform
@@ -56,13 +82,14 @@ config PCI_J721E_HOST
core.
config PCI_J721E_EP
- bool "TI J721E PCIe platform endpoint controller"
+ tristate "TI J721E PCIe controller (endpoint mode)"
+ depends on ARCH_K3 || COMPILE_TEST
depends on OF
depends on PCI_ENDPOINT
- select PCIE_CADENCE_EP
select PCI_J721E
help
Say Y here if you want to support the TI J721E PCIe platform
controller in endpoint mode. TI J721E PCIe controller uses Cadence PCIe
core.
+
endmenu
diff --git a/drivers/pci/controller/cadence/Makefile b/drivers/pci/controller/cadence/Makefile
index 9bac5fb2f13d..b8ec1cecfaa8 100644
--- a/drivers/pci/controller/cadence/Makefile
+++ b/drivers/pci/controller/cadence/Makefile
@@ -1,6 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o
-obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
-obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
+pcie-cadence-mod-y := pcie-cadence-hpa.o pcie-cadence.o
+pcie-cadence-host-mod-y := pcie-cadence-host-common.o pcie-cadence-host.o pcie-cadence-host-hpa.o
+pcie-cadence-ep-mod-y := pcie-cadence-ep.o
+
+obj-$(CONFIG_PCIE_CADENCE) = pcie-cadence-mod.o
+obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host-mod.o
+obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep-mod.o
obj-$(CONFIG_PCIE_CADENCE_PLAT) += pcie-cadence-plat.o
obj-$(CONFIG_PCI_J721E) += pci-j721e.o
+obj-$(CONFIG_PCIE_SG2042_HOST) += pcie-sg2042.o
+obj-$(CONFIG_PCI_SKY1_HOST) += pci-sky1.o
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index cc83a8925ce0..ecd1b0312400 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -7,22 +7,28 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/container_of.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include "../../pci.h"
#include "pcie-cadence.h"
+#define cdns_pcie_to_rc(p) container_of(p, struct cdns_pcie_rc, pcie)
+
#define ENABLE_REG_SYS_2 0x108
+#define ENABLE_CLR_REG_SYS_2 0x308
#define STATUS_REG_SYS_2 0x508
#define STATUS_CLR_REG_SYS_2 0x708
#define LINK_DOWN BIT(1)
@@ -42,18 +48,18 @@ enum link_status {
};
#define J721E_MODE_RC BIT(7)
-#define LANE_COUNT_MASK BIT(8)
#define LANE_COUNT(n) ((n) << 8)
+#define ACSPCIE_PAD_DISABLE_MASK GENMASK(1, 0)
#define GENERATION_SEL_MASK GENMASK(1, 0)
-#define MAX_LANES 2
-
struct j721e_pcie {
struct cdns_pcie *cdns_pcie;
struct clk *refclk;
u32 mode;
u32 num_lanes;
+ u32 max_lanes;
+ struct gpio_desc *reset_gpio;
void __iomem *user_cfg_base;
void __iomem *intd_cfg_base;
u32 linkdown_irq_regfield;
@@ -71,6 +77,7 @@ struct j721e_pcie_data {
unsigned int quirk_disable_flr:1;
u32 linkdown_irq_regfield;
unsigned int byte_access_allowed:1;
+ unsigned int max_lanes;
};
static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
@@ -111,6 +118,15 @@ static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv)
return IRQ_HANDLED;
}
+static void j721e_pcie_disable_link_irq(struct j721e_pcie *pcie)
+{
+ u32 reg;
+
+ reg = j721e_pcie_intd_readl(pcie, ENABLE_CLR_REG_SYS_2);
+ reg |= pcie->linkdown_irq_regfield;
+ j721e_pcie_intd_writel(pcie, ENABLE_CLR_REG_SYS_2, reg);
+}
+
static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie)
{
u32 reg;
@@ -148,11 +164,7 @@ static bool j721e_pcie_link_up(struct cdns_pcie *cdns_pcie)
u32 reg;
reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_LINKSTATUS);
- reg &= LINK_STATUS;
- if (reg == LINK_UP_DL_COMPLETED)
- return true;
-
- return false;
+ return (reg & LINK_STATUS) == LINK_UP_DL_COMPLETED;
}
static const struct cdns_pcie_ops j721e_pcie_ops = {
@@ -206,17 +218,51 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
{
struct device *dev = pcie->cdns_pcie->dev;
u32 lanes = pcie->num_lanes;
+ u32 mask = BIT(8);
u32 val = 0;
int ret;
+ if (pcie->max_lanes == 4)
+ mask = GENMASK(9, 8);
+
val = LANE_COUNT(lanes - 1);
- ret = regmap_update_bits(syscon, offset, LANE_COUNT_MASK, val);
+ ret = regmap_update_bits(syscon, offset, mask, val);
if (ret)
dev_err(dev, "failed to set link count\n");
return ret;
}
+static int j721e_enable_acspcie_refclk(struct j721e_pcie *pcie,
+ struct regmap *syscon)
+{
+ struct device *dev = pcie->cdns_pcie->dev;
+ struct device_node *node = dev->of_node;
+ u32 mask = ACSPCIE_PAD_DISABLE_MASK;
+ struct of_phandle_args args;
+ u32 val;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(node,
+ "ti,syscon-acspcie-proxy-ctrl",
+ 1, 0, &args);
+ if (ret) {
+ dev_err(dev,
+ "ti,syscon-acspcie-proxy-ctrl has invalid arguments\n");
+ return ret;
+ }
+
+ /* Clear PAD IO disable bits to enable refclk output */
+ val = ~(args.args[0]);
+ ret = regmap_update_bits(syscon, 0, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to enable ACSPCIE refclk: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
{
struct device *dev = pcie->cdns_pcie->dev;
@@ -238,6 +284,25 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
if (!ret)
offset = args.args[0];
+ /*
+ * The PCIe Controller's registers have different "reset-values"
+ * depending on the "strap" settings programmed into the PCIEn_CTRL
+ * register within the CTRL_MMR memory-mapped register space.
+ * The registers latch onto a "reset-value" based on the "strap"
+ * settings sampled after the PCIe Controller is powered on.
+ * To ensure that the "reset-values" are sampled accurately, power
+ * off the PCIe Controller before programming the "strap" settings
+ * and power it on after that. The runtime PM APIs namely
+ * pm_runtime_put_sync() and pm_runtime_get_sync() will decrement and
+ * increment the usage counter respectively, causing GENPD to power off
+ * and power on the PCIe Controller.
+ */
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to power off PCIe Controller\n");
+ return ret;
+ }
+
ret = j721e_pcie_set_mode(pcie, syscon, offset);
if (ret < 0) {
dev_err(dev, "Failed to set pci mode\n");
@@ -256,7 +321,19 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
return ret;
}
- return 0;
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to power on PCIe Controller\n");
+ return ret;
+ }
+
+ /* Enable ACSPCIE refclk output if the optional property exists */
+ syscon = syscon_regmap_lookup_by_phandle_optional(node,
+ "ti,syscon-acspcie-proxy-ctrl");
+ if (!syscon)
+ return 0;
+
+ return j721e_enable_acspcie_refclk(pcie, syscon);
}
static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
@@ -290,11 +367,13 @@ static const struct j721e_pcie_data j721e_pcie_rc_data = {
.quirk_retrain_flag = true,
.byte_access_allowed = false,
.linkdown_irq_regfield = LINK_DOWN,
+ .max_lanes = 2,
};
static const struct j721e_pcie_data j721e_pcie_ep_data = {
.mode = PCI_MODE_EP,
.linkdown_irq_regfield = LINK_DOWN,
+ .max_lanes = 2,
};
static const struct j721e_pcie_data j7200_pcie_rc_data = {
@@ -302,23 +381,49 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = {
.quirk_detect_quiet_flag = true,
.linkdown_irq_regfield = J7200_LINK_DOWN,
.byte_access_allowed = true,
+ .max_lanes = 2,
};
static const struct j721e_pcie_data j7200_pcie_ep_data = {
.mode = PCI_MODE_EP,
.quirk_detect_quiet_flag = true,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
.quirk_disable_flr = true,
+ .max_lanes = 2,
};
static const struct j721e_pcie_data am64_pcie_rc_data = {
.mode = PCI_MODE_RC,
.linkdown_irq_regfield = J7200_LINK_DOWN,
.byte_access_allowed = true,
+ .max_lanes = 1,
};
static const struct j721e_pcie_data am64_pcie_ep_data = {
.mode = PCI_MODE_EP,
.linkdown_irq_regfield = J7200_LINK_DOWN,
+ .max_lanes = 1,
+};
+
+static const struct j721e_pcie_data j784s4_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .quirk_retrain_flag = true,
+ .byte_access_allowed = false,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .max_lanes = 4,
+};
+
+static const struct j721e_pcie_data j784s4_pcie_ep_data = {
+ .mode = PCI_MODE_EP,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .max_lanes = 4,
+};
+
+static const struct j721e_pcie_data j722s_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .byte_access_allowed = true,
+ .max_lanes = 1,
};
static const struct of_device_id of_j721e_pcie_match[] = {
@@ -346,8 +451,21 @@ static const struct of_device_id of_j721e_pcie_match[] = {
.compatible = "ti,am64-pcie-ep",
.data = &am64_pcie_ep_data,
},
+ {
+ .compatible = "ti,j784s4-pcie-host",
+ .data = &j784s4_pcie_rc_data,
+ },
+ {
+ .compatible = "ti,j784s4-pcie-ep",
+ .data = &j784s4_pcie_ep_data,
+ },
+ {
+ .compatible = "ti,j722s-pcie-host",
+ .data = &j722s_pcie_rc_data,
+ },
{},
};
+MODULE_DEVICE_TABLE(of, of_j721e_pcie_match);
static int j721e_pcie_probe(struct platform_device *pdev)
{
@@ -359,9 +477,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
struct j721e_pcie *pcie;
struct cdns_pcie_rc *rc = NULL;
struct cdns_pcie_ep *ep = NULL;
- struct gpio_desc *gpiod;
void __iomem *base;
- struct clk *clk;
u32 num_lanes;
u32 mode;
int ret;
@@ -379,7 +495,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
switch (mode) {
case PCI_MODE_RC:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST))
+ if (!IS_ENABLED(CONFIG_PCI_J721E_HOST))
return -ENODEV;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
@@ -398,7 +514,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pcie->cdns_pcie = cdns_pcie;
break;
case PCI_MODE_EP:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP))
+ if (!IS_ENABLED(CONFIG_PCI_J721E_EP))
return -ENODEV;
ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
@@ -432,9 +548,13 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pcie->user_cfg_base = base;
ret = of_property_read_u32(node, "num-lanes", &num_lanes);
- if (ret || num_lanes > MAX_LANES)
+ if (ret || num_lanes > data->max_lanes) {
+ dev_warn(dev, "num-lanes property not provided or invalid, setting num-lanes to 1\n");
num_lanes = 1;
+ }
+
pcie->num_lanes = num_lanes;
+ pcie->max_lanes = data->max_lanes;
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)))
return -EINVAL;
@@ -447,20 +567,20 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
goto err_get_sync;
}
ret = j721e_pcie_ctrl_init(pcie);
if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err_probe(dev, ret, "j721e_pcie_ctrl_init failed\n");
goto err_get_sync;
}
ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0,
"j721e-pcie-link-down-irq", pcie);
if (ret < 0) {
- dev_err(dev, "failed to request link state IRQ %d\n", irq);
+ dev_err_probe(dev, ret, "failed to request link state IRQ %d\n", irq);
goto err_get_sync;
}
@@ -468,58 +588,47 @@ static int j721e_pcie_probe(struct platform_device *pdev)
switch (mode) {
case PCI_MODE_RC:
- gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(gpiod)) {
- ret = PTR_ERR(gpiod);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get reset GPIO\n");
+ pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->reset_gpio)) {
+ ret = dev_err_probe(dev, PTR_ERR(pcie->reset_gpio),
+ "Failed to get reset GPIO\n");
goto err_get_sync;
}
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
- dev_err(dev, "Failed to init phy\n");
+ dev_err_probe(dev, ret, "Failed to init phy\n");
goto err_get_sync;
}
- clk = devm_clk_get_optional(dev, "pcie_refclk");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(dev, "failed to get pcie_refclk\n");
- goto err_pcie_setup;
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "failed to enable pcie_refclk\n");
+ pcie->refclk = devm_clk_get_optional_enabled(dev, "pcie_refclk");
+ if (IS_ERR(pcie->refclk)) {
+ ret = dev_err_probe(dev, PTR_ERR(pcie->refclk),
+ "failed to enable pcie_refclk\n");
goto err_pcie_setup;
}
- pcie->refclk = clk;
/*
- * "Power Sequencing and Reset Signal Timings" table in
- * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0
- * indicates PERST# should be deasserted after minimum of 100us
- * once REFCLK is stable. The REFCLK to the connector in RC
- * mode is selected while enabling the PHY. So deassert PERST#
- * after 100 us.
+ * Section 2.2 of the PCI Express Card Electromechanical
+ * Specification (Revision 5.1) mandates that the deassertion
+ * of the PERST# signal should be delayed by 100 ms (TPVPERL).
+ * This shall ensure that the power and the reference clock
+ * are stable.
*/
- if (gpiod) {
- usleep_range(100, 200);
- gpiod_set_value_cansleep(gpiod, 1);
+ if (pcie->reset_gpio) {
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
}
ret = cdns_pcie_host_setup(rc);
- if (ret < 0) {
- clk_disable_unprepare(pcie->refclk);
+ if (ret < 0)
goto err_pcie_setup;
- }
break;
case PCI_MODE_EP:
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
- dev_err(dev, "Failed to init phy\n");
+ dev_err_probe(dev, ret, "Failed to init phy\n");
goto err_get_sync;
}
@@ -542,20 +651,110 @@ err_get_sync:
return ret;
}
-static int j721e_pcie_remove(struct platform_device *pdev)
+static void j721e_pcie_remove(struct platform_device *pdev)
{
struct j721e_pcie *pcie = platform_get_drvdata(pdev);
struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
struct device *dev = &pdev->dev;
+ struct cdns_pcie_ep *ep;
+ struct cdns_pcie_rc *rc;
+
+ if (pcie->mode == PCI_MODE_RC) {
+ rc = container_of(cdns_pcie, struct cdns_pcie_rc, pcie);
+ cdns_pcie_host_disable(rc);
+ } else {
+ ep = container_of(cdns_pcie, struct cdns_pcie_ep, pcie);
+ cdns_pcie_ep_disable(ep);
+ }
+
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
- clk_disable_unprepare(pcie->refclk);
cdns_pcie_disable_phy(cdns_pcie);
+ j721e_pcie_disable_link_irq(pcie);
pm_runtime_put(dev);
pm_runtime_disable(dev);
+}
+
+static int j721e_pcie_suspend_noirq(struct device *dev)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(dev);
+
+ if (pcie->mode == PCI_MODE_RC) {
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+ clk_disable_unprepare(pcie->refclk);
+ }
+
+ cdns_pcie_disable_phy(pcie->cdns_pcie);
+
+ return 0;
+}
+
+static int j721e_pcie_resume_noirq(struct device *dev)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(dev);
+ struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
+ int ret;
+
+ ret = j721e_pcie_ctrl_init(pcie);
+ if (ret < 0)
+ return ret;
+
+ j721e_pcie_config_link_irq(pcie);
+
+ /*
+ * This is not called explicitly in the probe, it is called by
+ * cdns_pcie_init_phy().
+ */
+ ret = cdns_pcie_enable_phy(pcie->cdns_pcie);
+ if (ret < 0)
+ return ret;
+
+ if (pcie->mode == PCI_MODE_RC) {
+ struct cdns_pcie_rc *rc = cdns_pcie_to_rc(cdns_pcie);
+
+ ret = clk_prepare_enable(pcie->refclk);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Section 2.2 of the PCI Express Card Electromechanical
+ * Specification (Revision 5.1) mandates that the deassertion
+ * of the PERST# signal should be delayed by 100 ms (TPVPERL).
+ * This shall ensure that the power and the reference clock
+ * are stable.
+ */
+ if (pcie->reset_gpio) {
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+ }
+
+ ret = cdns_pcie_host_link_setup(rc);
+ if (ret < 0) {
+ clk_disable_unprepare(pcie->refclk);
+ return ret;
+ }
+
+ /*
+ * Reset internal status of BARs to force reinitialization in
+ * cdns_pcie_host_init().
+ */
+ for (enum cdns_pcie_rp_bar bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
+ rc->avail_ib_bar[bar] = true;
+
+ ret = cdns_pcie_host_init(rc);
+ if (ret) {
+ clk_disable_unprepare(pcie->refclk);
+ return ret;
+ }
+ }
return 0;
}
+static DEFINE_NOIRQ_DEV_PM_OPS(j721e_pcie_pm_ops,
+ j721e_pcie_suspend_noirq,
+ j721e_pcie_resume_noirq);
+
static struct platform_driver j721e_pcie_driver = {
.probe = j721e_pcie_probe,
.remove = j721e_pcie_remove,
@@ -563,6 +762,11 @@ static struct platform_driver j721e_pcie_driver = {
.name = "j721e-pcie",
.of_match_table = of_j721e_pcie_match,
.suppress_bind_attrs = true,
+ .pm = pm_sleep_ptr(&j721e_pcie_pm_ops),
},
};
-builtin_platform_driver(j721e_pcie_driver);
+module_platform_driver(j721e_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCIe controller driver for TI's J721E and related SoCs");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
diff --git a/drivers/pci/controller/cadence/pci-sky1.c b/drivers/pci/controller/cadence/pci-sky1.c
new file mode 100644
index 000000000000..d8c216dc120d
--- /dev/null
+++ b/drivers/pci/controller/cadence/pci-sky1.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe controller driver for CIX's sky1 SoCs
+ *
+ * Copyright 2025 Cix Technology Group Co., Ltd.
+ * Author: Hans Zhang <hans.zhang@cixtech.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/pci_ids.h>
+
+#include "pcie-cadence.h"
+#include "pcie-cadence-host-common.h"
+
+#define PCI_VENDOR_ID_CIX 0x1f6c
+#define PCI_DEVICE_ID_CIX_SKY1 0x0001
+
+#define STRAP_REG(n) ((n) * 0x04)
+#define STATUS_REG(n) ((n) * 0x04)
+#define LINK_TRAINING_ENABLE BIT(0)
+#define LINK_COMPLETE BIT(0)
+
+#define SKY1_IP_REG_BANK 0x1000
+#define SKY1_IP_CFG_CTRL_REG_BANK 0x4c00
+#define SKY1_IP_AXI_MASTER_COMMON 0xf000
+#define SKY1_AXI_SLAVE 0x9000
+#define SKY1_AXI_MASTER 0xb000
+#define SKY1_AXI_HLS_REGISTERS 0xc000
+#define SKY1_AXI_RAS_REGISTERS 0xe000
+#define SKY1_DTI_REGISTERS 0xd000
+
+#define IP_REG_I_DBG_STS_0 0x420
+
+struct sky1_pcie {
+ struct cdns_pcie *cdns_pcie;
+ struct cdns_pcie_rc *cdns_pcie_rc;
+
+ struct resource *cfg_res;
+ struct resource *msg_res;
+ struct pci_config_window *cfg;
+ void __iomem *strap_base;
+ void __iomem *status_base;
+ void __iomem *reg_base;
+ void __iomem *cfg_base;
+ void __iomem *msg_base;
+};
+
+static int sky1_pcie_resource_get(struct platform_device *pdev,
+ struct sky1_pcie *pcie)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *base;
+
+ base = devm_platform_ioremap_resource_byname(pdev, "reg");
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base),
+ "unable to find \"reg\" registers\n");
+ pcie->reg_base = base;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ if (!res)
+ return dev_err_probe(dev, -ENODEV, "unable to get \"cfg\" resource\n");
+ pcie->cfg_res = res;
+
+ base = devm_platform_ioremap_resource_byname(pdev, "rcsu_strap");
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base),
+ "unable to find \"rcsu_strap\" registers\n");
+ pcie->strap_base = base;
+
+ base = devm_platform_ioremap_resource_byname(pdev, "rcsu_status");
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base),
+ "unable to find \"rcsu_status\" registers\n");
+ pcie->status_base = base;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "msg");
+ if (!res)
+ return dev_err_probe(dev, -ENODEV, "unable to get \"msg\" resource\n");
+ pcie->msg_res = res;
+ pcie->msg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->msg_base)) {
+ return dev_err_probe(dev, PTR_ERR(pcie->msg_base),
+ "unable to ioremap msg resource\n");
+ }
+
+ return 0;
+}
+
+static int sky1_pcie_start_link(struct cdns_pcie *cdns_pcie)
+{
+ struct sky1_pcie *pcie = dev_get_drvdata(cdns_pcie->dev);
+ u32 val;
+
+ val = readl(pcie->strap_base + STRAP_REG(1));
+ val |= LINK_TRAINING_ENABLE;
+ writel(val, pcie->strap_base + STRAP_REG(1));
+
+ return 0;
+}
+
+static void sky1_pcie_stop_link(struct cdns_pcie *cdns_pcie)
+{
+ struct sky1_pcie *pcie = dev_get_drvdata(cdns_pcie->dev);
+ u32 val;
+
+ val = readl(pcie->strap_base + STRAP_REG(1));
+ val &= ~LINK_TRAINING_ENABLE;
+ writel(val, pcie->strap_base + STRAP_REG(1));
+}
+
+static bool sky1_pcie_link_up(struct cdns_pcie *cdns_pcie)
+{
+ u32 val;
+
+ val = cdns_pcie_hpa_readl(cdns_pcie, REG_BANK_IP_REG,
+ IP_REG_I_DBG_STS_0);
+ return val & LINK_COMPLETE;
+}
+
+static const struct cdns_pcie_ops sky1_pcie_ops = {
+ .start_link = sky1_pcie_start_link,
+ .stop_link = sky1_pcie_stop_link,
+ .link_up = sky1_pcie_link_up,
+};
+
+static int sky1_pcie_probe(struct platform_device *pdev)
+{
+ struct cdns_plat_pcie_of_data *reg_off;
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
+ struct cdns_pcie *cdns_pcie;
+ struct resource_entry *bus;
+ struct cdns_pcie_rc *rc;
+ struct sky1_pcie *pcie;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ if (!bridge)
+ return -ENOMEM;
+
+ ret = sky1_pcie_resource_get(pdev, pcie);
+ if (ret < 0)
+ return ret;
+
+ bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (!bus)
+ return -ENODEV;
+
+ pcie->cfg = pci_ecam_create(dev, pcie->cfg_res, bus->res,
+ &pci_generic_ecam_ops);
+ if (IS_ERR(pcie->cfg))
+ return PTR_ERR(pcie->cfg);
+
+ bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops;
+ rc = pci_host_bridge_priv(bridge);
+ rc->ecam_supported = 1;
+ rc->cfg_base = pcie->cfg->win;
+ rc->cfg_res = &pcie->cfg->res;
+
+ cdns_pcie = &rc->pcie;
+ cdns_pcie->dev = dev;
+ cdns_pcie->ops = &sky1_pcie_ops;
+ cdns_pcie->reg_base = pcie->reg_base;
+ cdns_pcie->msg_res = pcie->msg_res;
+ cdns_pcie->is_rc = 1;
+
+ reg_off = devm_kzalloc(dev, sizeof(*reg_off), GFP_KERNEL);
+ if (!reg_off)
+ return -ENOMEM;
+
+ reg_off->ip_reg_bank_offset = SKY1_IP_REG_BANK;
+ reg_off->ip_cfg_ctrl_reg_offset = SKY1_IP_CFG_CTRL_REG_BANK;
+ reg_off->axi_mstr_common_offset = SKY1_IP_AXI_MASTER_COMMON;
+ reg_off->axi_slave_offset = SKY1_AXI_SLAVE;
+ reg_off->axi_master_offset = SKY1_AXI_MASTER;
+ reg_off->axi_hls_offset = SKY1_AXI_HLS_REGISTERS;
+ reg_off->axi_ras_offset = SKY1_AXI_RAS_REGISTERS;
+ reg_off->axi_dti_offset = SKY1_DTI_REGISTERS;
+ cdns_pcie->cdns_pcie_reg_offsets = reg_off;
+
+ pcie->cdns_pcie = cdns_pcie;
+ pcie->cdns_pcie_rc = rc;
+ pcie->cfg_base = rc->cfg_base;
+ bridge->sysdata = pcie->cfg;
+
+ rc->vendor_id = PCI_VENDOR_ID_CIX;
+ rc->device_id = PCI_DEVICE_ID_CIX_SKY1;
+ rc->no_inbound_map = 1;
+
+ dev_set_drvdata(dev, pcie);
+
+ ret = cdns_pcie_hpa_host_setup(rc);
+ if (ret < 0) {
+ pci_ecam_free(pcie->cfg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id of_sky1_pcie_match[] = {
+ { .compatible = "cix,sky1-pcie-host", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_sky1_pcie_match);
+
+static void sky1_pcie_remove(struct platform_device *pdev)
+{
+ struct sky1_pcie *pcie = platform_get_drvdata(pdev);
+
+ pci_ecam_free(pcie->cfg);
+}
+
+static struct platform_driver sky1_pcie_driver = {
+ .probe = sky1_pcie_probe,
+ .remove = sky1_pcie_remove,
+ .driver = {
+ .name = "sky1-pcie",
+ .of_match_table = of_sky1_pcie_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(sky1_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCIe controller driver for CIX's sky1 SoCs");
+MODULE_AUTHOR("Hans Zhang <hans.zhang@cixtech.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index b8b655d4047e..c0e1194a936b 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -3,14 +3,17 @@
// Cadence PCIe endpoint controller driver.
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci-epc.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include "pcie-cadence.h"
+#include "../../pci.h"
#define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
@@ -18,12 +21,13 @@
static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn)
{
- u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
u32 first_vf_offset, stride;
+ u16 cap;
if (vfn == 0)
return fn;
+ cap = cdns_pcie_find_ext_capability(pcie, PCI_EXT_CAP_ID_SRIOV);
first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET);
stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE);
fn = fn + first_vf_offset + ((vfn - 1) * stride);
@@ -35,10 +39,11 @@ static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
- u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie *pcie = &ep->pcie;
u32 reg;
+ u16 cap;
+ cap = cdns_pcie_find_ext_capability(pcie, PCI_EXT_CAP_ID_SRIOV);
if (vfn > 1) {
dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n");
return -EINVAL;
@@ -98,14 +103,11 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
} else {
bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
- bool is_64bits = sz > SZ_2G;
+ bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
if (is_64bits && (bar & 1))
return -EINVAL;
- if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
- epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
-
if (is_64bits && is_prefetch)
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
else if (is_prefetch)
@@ -222,13 +224,15 @@ static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
clear_bit(r, &ep->ob_region_map);
}
-static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc)
+static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+ u8 mmc = order_base_2(nr_irqs);
u16 flags;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/*
@@ -248,9 +252,10 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mme;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/* Validate that the MSI feature is actually enabled. */
@@ -262,18 +267,19 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
* Get the Multiple Message Enable bitfield from the Message Control
* register.
*/
- mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
+ mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags);
- return mme;
+ return 1 << mme;
}
static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 val, reg;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no);
reg = cap + PCI_MSIX_FLAGS;
@@ -283,34 +289,34 @@ static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
val &= PCI_MSIX_FLAGS_QSIZE;
- return val;
+ return val + 1;
}
static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
- u16 interrupts, enum pci_barno bir,
- u32 offset)
+ u16 nr_irqs, enum pci_barno bir, u32 offset)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 val, reg;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
reg = cap + PCI_MSIX_FLAGS;
val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
- val |= interrupts;
+ val |= nr_irqs - 1; /* encoded as N-1 */
cdns_pcie_ep_fn_writew(pcie, fn, reg, val);
- /* Set MSIX BAR and offset */
+ /* Set MSI-X BAR and offset */
reg = cap + PCI_MSIX_TABLE;
val = offset | bir;
cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
- /* Set PBA BAR and offset. BAR must match MSIX BAR */
+ /* Set PBA BAR and offset. BAR must match MSI-X BAR */
reg = cap + PCI_MSIX_PBA;
- val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir;
cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
return 0;
@@ -339,10 +345,10 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
if (is_asserted) {
ep->irq_pending |= BIT(intx);
- msg_code = MSG_CODE_ASSERT_INTA + intx;
+ msg_code = PCIE_MSG_CODE_ASSERT_INTA + intx;
} else {
ep->irq_pending &= ~BIT(intx);
- msg_code = MSG_CODE_DEASSERT_INTA + intx;
+ msg_code = PCIE_MSG_CODE_DEASSERT_INTA + intx;
}
spin_lock_irqsave(&ep->lock, flags);
@@ -353,14 +359,13 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
}
spin_unlock_irqrestore(&ep->lock, flags);
- offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
- CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
- CDNS_PCIE_MSG_NO_DATA;
+ offset = CDNS_PCIE_NORMAL_MSG_ROUTING(PCIE_MSG_TYPE_R_LOCAL) |
+ CDNS_PCIE_NORMAL_MSG_CODE(msg_code);
writel(0, ep->irq_cpu_addr + offset);
}
-static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
- u8 intx)
+static int cdns_pcie_ep_send_intx_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
+ u8 intx)
{
u16 cmd;
@@ -370,7 +375,7 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
cdns_pcie_ep_assert_intx(ep, fn, intx, true);
/*
- * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
+ * The mdelay() value was taken from dra7xx_pcie_raise_intx_irq()
*/
mdelay(1);
cdns_pcie_ep_assert_intx(ep, fn, intx, false);
@@ -381,11 +386,11 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u8 interrupt_num)
{
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mme, data, data_mask;
- u8 msi_count;
u64 pci_addr, pci_addr_mask = 0xff;
+ u8 msi_count, cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/* Check whether the MSI feature has been enabled by the PCI host. */
@@ -394,7 +399,7 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
return -EINVAL;
/* Get the number of enabled MSIs */
- mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
+ mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags);
msi_count = 1 << mme;
if (!interrupt_num || interrupt_num > msi_count)
return -EINVAL;
@@ -433,14 +438,14 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
u32 *msi_addr_offset)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
struct cdns_pcie *pcie = &ep->pcie;
u64 pci_addr, pci_addr_mask = 0xff;
u16 flags, mme, data, data_mask;
- u8 msi_count;
+ u8 msi_count, cap;
int ret;
int i;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/* Check whether the MSI feature has been enabled by the PCI host. */
@@ -449,7 +454,7 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
return -EINVAL;
/* Get the number of enabled MSIs */
- mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
+ mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags);
msi_count = 1 << mme;
if (!interrupt_num || interrupt_num > msi_count)
return -EINVAL;
@@ -483,16 +488,16 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u16 interrupt_num)
{
- u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 tbl_offset, msg_data, reg;
struct cdns_pcie *pcie = &ep->pcie;
struct pci_epf_msix_tbl *msix_tbl;
struct cdns_pcie_epf *epf;
u64 pci_addr_mask = 0xff;
u64 msg_addr;
+ u8 bir, cap;
u16 flags;
- u8 bir;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
epf = &ep->epf[fn];
if (vfn > 0)
epf = &epf->epf[vfn - 1];
@@ -506,7 +511,7 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
reg = cap + PCI_MSIX_TABLE;
tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg);
- bir = tbl_offset & PCI_MSIX_TABLE_BIR;
+ bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset);
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
msix_tbl = epf->epf_bar[bir]->addr + tbl_offset;
@@ -531,25 +536,24 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
}
static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
struct device *dev = pcie->dev;
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
+ case PCI_IRQ_INTX:
if (vfn > 0) {
- dev_err(dev, "Cannot raise legacy interrupts for VF\n");
+ dev_err(dev, "Cannot raise INTX interrupts for VF\n");
return -EINVAL;
}
- return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0);
+ return cdns_pcie_ep_send_intx_irq(ep, fn, vfn, 0);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num);
default:
@@ -565,26 +569,38 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
struct cdns_pcie *pcie = &ep->pcie;
struct device *dev = pcie->dev;
int max_epfs = sizeof(epc->function_num_map) * 8;
- int ret, value, epf;
+ int ret, epf, last_fn;
+ u32 reg, value;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_EXP);
/*
* BIT(0) is hardwired to 1, hence function 0 is always enabled
* and can't be disabled anyway.
*/
cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map);
+ /*
+ * Next function field in ARI_CAP_AND_CTR register for last function
+ * should be 0. Clear Next Function Number field for the last
+ * function used.
+ */
+ last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG);
+ reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn);
+ value = cdns_pcie_readl(pcie, reg);
+ value &= ~CDNS_PCIE_ARI_CAP_NFN_MASK;
+ cdns_pcie_writel(pcie, reg, value);
+
if (ep->quirk_disable_flr) {
for (epf = 0; epf < max_epfs; epf++) {
if (!(epc->function_num_map & BIT(epf)))
continue;
value = cdns_pcie_ep_fn_readl(pcie, epf,
- CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
- PCI_EXP_DEVCAP);
+ cap + PCI_EXP_DEVCAP);
value &= ~PCI_EXP_DEVCAP_FLR;
cdns_pcie_ep_fn_writel(pcie, epf,
- CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
- PCI_EXP_DEVCAP, value);
+ cap + PCI_EXP_DEVCAP, value);
}
}
@@ -598,14 +614,12 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
}
static const struct pci_epc_features cdns_pcie_epc_vf_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
.align = 65536,
};
static const struct pci_epc_features cdns_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
.align = 256,
@@ -636,6 +650,17 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {
.get_features = cdns_pcie_ep_get_features,
};
+void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
+{
+ struct device *dev = ep->pcie.dev;
+ struct pci_epc *epc = to_pci_epc(dev);
+
+ pci_epc_deinit_notify(epc);
+ pci_epc_mem_free_addr(epc, ep->irq_phys_addr, ep->irq_cpu_addr,
+ SZ_128K);
+ pci_epc_mem_exit(epc);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_ep_disable);
int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
@@ -734,6 +759,8 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
spin_lock_init(&ep->lock);
+ pci_epc_init_notify(epc);
+
return 0;
free_epc_mem:
@@ -741,3 +768,8 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_ep_setup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe endpoint controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host-common.c b/drivers/pci/controller/cadence/pcie-cadence-host-common.c
new file mode 100644
index 000000000000..15415d7f35ee
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-host-common.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence PCIe host controller library.
+ *
+ * Copyright (c) 2017 Cadence
+ * Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list_sort.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+
+#include "pcie-cadence.h"
+#include "pcie-cadence-host-common.h"
+
+#define LINK_RETRAIN_TIMEOUT HZ
+
+u64 bar_max_size[] = {
+ [RP_BAR0] = _ULL(128 * SZ_2G),
+ [RP_BAR1] = SZ_2G,
+ [RP_NO_BAR] = _BITULL(63),
+};
+EXPORT_SYMBOL_GPL(bar_max_size);
+
+int cdns_pcie_host_training_complete(struct cdns_pcie *pcie)
+{
+ u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
+ unsigned long end_jiffies;
+ u16 lnk_stat;
+
+ /* Wait for link training to complete. Exit after timeout. */
+ end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
+ do {
+ lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
+ if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
+ break;
+ usleep_range(0, 1000);
+ } while (time_before(jiffies, end_jiffies));
+
+ if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
+ return 0;
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_training_complete);
+
+int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie,
+ cdns_pcie_linkup_func pcie_link_up)
+{
+ struct device *dev = pcie->dev;
+ int retries;
+
+ /* Check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (pcie_link_up(pcie)) {
+ dev_info(dev, "Link up\n");
+ return 0;
+ }
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_wait_for_link);
+
+int cdns_pcie_retrain(struct cdns_pcie *pcie,
+ cdns_pcie_linkup_func pcie_link_up)
+{
+ u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
+ u16 lnk_stat, lnk_ctl;
+ int ret = 0;
+
+ /*
+ * Set retrain bit if current speed is 2.5 GB/s,
+ * but the PCIe root port support is > 2.5 GB/s.
+ */
+
+ lnk_cap_sls = cdns_pcie_readl(pcie, (CDNS_PCIE_RP_BASE + pcie_cap_off +
+ PCI_EXP_LNKCAP));
+ if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
+ return ret;
+
+ lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
+ if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
+ lnk_ctl = cdns_pcie_rp_readw(pcie,
+ pcie_cap_off + PCI_EXP_LNKCTL);
+ lnk_ctl |= PCI_EXP_LNKCTL_RL;
+ cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL,
+ lnk_ctl);
+
+ ret = cdns_pcie_host_training_complete(pcie);
+ if (ret)
+ return ret;
+
+ ret = cdns_pcie_host_wait_for_link(pcie, pcie_link_up);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_retrain);
+
+int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc,
+ cdns_pcie_linkup_func pcie_link_up)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ int ret;
+
+ ret = cdns_pcie_host_wait_for_link(pcie, pcie_link_up);
+
+ /*
+ * Retrain link for Gen2 training defect
+ * if quirk flag is set.
+ */
+ if (!ret && rc->quirk_retrain_flag)
+ ret = cdns_pcie_retrain(pcie, pcie_link_up);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_start_link);
+
+enum cdns_pcie_rp_bar
+cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size)
+{
+ enum cdns_pcie_rp_bar bar, sel_bar;
+
+ sel_bar = RP_BAR_UNDEFINED;
+ for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
+ if (!rc->avail_ib_bar[bar])
+ continue;
+
+ if (size <= bar_max_size[bar]) {
+ if (sel_bar == RP_BAR_UNDEFINED) {
+ sel_bar = bar;
+ continue;
+ }
+
+ if (bar_max_size[bar] < bar_max_size[sel_bar])
+ sel_bar = bar;
+ }
+ }
+
+ return sel_bar;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_find_min_bar);
+
+enum cdns_pcie_rp_bar
+cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size)
+{
+ enum cdns_pcie_rp_bar bar, sel_bar;
+
+ sel_bar = RP_BAR_UNDEFINED;
+ for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
+ if (!rc->avail_ib_bar[bar])
+ continue;
+
+ if (size >= bar_max_size[bar]) {
+ if (sel_bar == RP_BAR_UNDEFINED) {
+ sel_bar = bar;
+ continue;
+ }
+
+ if (bar_max_size[bar] > bar_max_size[sel_bar])
+ sel_bar = bar;
+ }
+ }
+
+ return sel_bar;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_find_max_bar);
+
+int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a,
+ const struct list_head *b)
+{
+ struct resource_entry *entry1, *entry2;
+
+ entry1 = container_of(a, struct resource_entry, node);
+ entry2 = container_of(b, struct resource_entry, node);
+
+ return resource_size(entry2->res) - resource_size(entry1->res);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_dma_ranges_cmp);
+
+int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
+ struct resource_entry *entry,
+ cdns_pcie_host_bar_ib_cfg pci_host_ib_config)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct device *dev = pcie->dev;
+ u64 cpu_addr, size, winsize;
+ enum cdns_pcie_rp_bar bar;
+ unsigned long flags;
+ int ret;
+
+ cpu_addr = entry->res->start;
+ flags = entry->res->flags;
+ size = resource_size(entry->res);
+
+ while (size > 0) {
+ /*
+ * Try to find a minimum BAR whose size is greater than
+ * or equal to the remaining resource_entry size. This will
+ * fail if the size of each of the available BARs is less than
+ * the remaining resource_entry size.
+ *
+ * If a minimum BAR is found, IB ATU will be configured and
+ * exited.
+ */
+ bar = cdns_pcie_host_find_min_bar(rc, size);
+ if (bar != RP_BAR_UNDEFINED) {
+ ret = pci_host_ib_config(rc, bar, cpu_addr, size, flags);
+ if (ret)
+ dev_err(dev, "IB BAR: %d config failed\n", bar);
+ return ret;
+ }
+
+ /*
+ * If the control reaches here, it would mean the remaining
+ * resource_entry size cannot be fitted in a single BAR. So we
+ * find a maximum BAR whose size is less than or equal to the
+ * remaining resource_entry size and split the resource entry
+ * so that part of resource entry is fitted inside the maximum
+ * BAR. The remaining size would be fitted during the next
+ * iteration of the loop.
+ *
+ * If a maximum BAR is not found, there is no way we can fit
+ * this resource_entry, so we error out.
+ */
+ bar = cdns_pcie_host_find_max_bar(rc, size);
+ if (bar == RP_BAR_UNDEFINED) {
+ dev_err(dev, "No free BAR to map cpu_addr %llx\n",
+ cpu_addr);
+ return -EINVAL;
+ }
+
+ winsize = bar_max_size[bar];
+ ret = pci_host_ib_config(rc, bar, cpu_addr, winsize, flags);
+ if (ret) {
+ dev_err(dev, "IB BAR: %d config failed\n", bar);
+ return ret;
+ }
+
+ size -= winsize;
+ cpu_addr += winsize;
+ }
+
+ return 0;
+}
+
+int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc,
+ cdns_pcie_host_bar_ib_cfg pci_host_ib_config)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct device *dev = pcie->dev;
+ struct device_node *np = dev->of_node;
+ struct pci_host_bridge *bridge;
+ struct resource_entry *entry;
+ u32 no_bar_nbits = 32;
+ int err;
+
+ bridge = pci_host_bridge_from_priv(rc);
+ if (!bridge)
+ return -ENOMEM;
+
+ if (list_empty(&bridge->dma_ranges)) {
+ of_property_read_u32(np, "cdns,no-bar-match-nbits",
+ &no_bar_nbits);
+ err = pci_host_ib_config(rc, RP_NO_BAR, 0x0, (u64)1 << no_bar_nbits, 0);
+ if (err)
+ dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR);
+ return err;
+ }
+
+ list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp);
+
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ err = cdns_pcie_host_bar_config(rc, entry, pci_host_ib_config);
+ if (err) {
+ dev_err(dev, "Fail to configure IB using dma-ranges\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe host controller driver");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host-common.h b/drivers/pci/controller/cadence/pcie-cadence-host-common.h
new file mode 100644
index 000000000000..fe7d4202a8b6
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-host-common.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence PCIe Host controller driver.
+ *
+ * Copyright (c) 2017 Cadence
+ * Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+ */
+#ifndef _PCIE_CADENCE_HOST_COMMON_H
+#define _PCIE_CADENCE_HOST_COMMON_H
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+extern u64 bar_max_size[];
+
+typedef int (*cdns_pcie_host_bar_ib_cfg)(struct cdns_pcie_rc *,
+ enum cdns_pcie_rp_bar,
+ u64,
+ u64,
+ unsigned long);
+typedef bool (*cdns_pcie_linkup_func)(struct cdns_pcie *);
+
+int cdns_pcie_host_training_complete(struct cdns_pcie *pcie);
+int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie,
+ cdns_pcie_linkup_func pcie_link_up);
+int cdns_pcie_retrain(struct cdns_pcie *pcie, cdns_pcie_linkup_func pcie_linkup_func);
+int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc,
+ cdns_pcie_linkup_func pcie_link_up);
+enum cdns_pcie_rp_bar
+cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size);
+enum cdns_pcie_rp_bar
+cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size);
+int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a,
+ const struct list_head *b);
+int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc,
+ enum cdns_pcie_rp_bar bar,
+ u64 cpu_addr,
+ u64 size,
+ unsigned long flags);
+int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
+ struct resource_entry *entry,
+ cdns_pcie_host_bar_ib_cfg pci_host_ib_config);
+int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc,
+ cdns_pcie_host_bar_ib_cfg pci_host_ib_config);
+
+#endif /* _PCIE_CADENCE_HOST_COMMON_H */
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host-hpa.c b/drivers/pci/controller/cadence/pcie-cadence-host-hpa.c
new file mode 100644
index 000000000000..0f540bed58e8
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-host-hpa.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence PCIe host controller driver.
+ *
+ * Copyright (c) 2024, Cadence Design Systems
+ * Author: Manikandan K Pillai <mpillai@cadence.com>
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list_sort.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#include "pcie-cadence.h"
+#include "pcie-cadence-host-common.h"
+
+static u8 bar_aperture_mask[] = {
+ [RP_BAR0] = 0x3F,
+ [RP_BAR1] = 0x3F,
+};
+
+void __iomem *cdns_pci_hpa_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
+ struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
+ struct cdns_pcie *pcie = &rc->pcie;
+ unsigned int busn = bus->number;
+ u32 addr0, desc0, desc1, ctrl0;
+ u32 regval;
+
+ if (pci_is_root_bus(bus)) {
+ /*
+ * Only the root port (devfn == 0) is connected to this bus.
+ * All other PCI devices are behind some bridge hence on another
+ * bus.
+ */
+ if (devfn)
+ return NULL;
+
+ return pcie->reg_base + (where & 0xfff);
+ }
+
+ /* Clear AXI link-down status */
+ regval = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE, CDNS_PCIE_HPA_AT_LINKDOWN);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, CDNS_PCIE_HPA_AT_LINKDOWN,
+ (regval & ~GENMASK(0, 0)));
+
+ /* Update Output registers for AXI region 0 */
+ addr0 = CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS(busn);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(0), addr0);
+
+ desc1 = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0));
+ desc1 &= ~CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN_MASK;
+ desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(0);
+ ctrl0 = CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS |
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN;
+
+ if (busn == bridge->busnr + 1)
+ desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
+ else
+ desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC0(0), desc0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0), desc1);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(0), ctrl0);
+
+ return rc->cfg_base + (where & 0xfff);
+}
+
+static struct pci_ops cdns_pcie_hpa_host_ops = {
+ .map_bus = cdns_pci_hpa_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static void cdns_pcie_hpa_host_enable_ptm_response(struct cdns_pcie *pcie)
+{
+ u32 val;
+
+ val = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_LM_PTM_CTRL);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_LM_PTM_CTRL,
+ val | CDNS_PCIE_HPA_LM_PTM_CTRL_PTMRSEN);
+}
+
+static int cdns_pcie_hpa_host_bar_ib_config(struct cdns_pcie_rc *rc,
+ enum cdns_pcie_rp_bar bar,
+ u64 cpu_addr, u64 size,
+ unsigned long flags)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ u32 addr0, addr1, aperture, value;
+
+ if (!rc->avail_ib_bar[bar])
+ return -ENODEV;
+
+ rc->avail_ib_bar[bar] = false;
+
+ aperture = ilog2(size);
+ if (bar == RP_NO_BAR) {
+ addr0 = CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS(aperture) |
+ (lower_32_bits(cpu_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(cpu_addr);
+ } else {
+ addr0 = lower_32_bits(cpu_addr);
+ addr1 = upper_32_bits(cpu_addr);
+ }
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_MASTER,
+ CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0(bar), addr0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_MASTER,
+ CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR1(bar), addr1);
+
+ if (bar == RP_NO_BAR)
+ bar = (enum cdns_pcie_rp_bar)BAR_0;
+
+ value = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_CFG_CTRL_REG, CDNS_PCIE_HPA_LM_RC_BAR_CFG);
+ value &= ~(HPA_LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
+ HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
+ HPA_LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
+ HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
+ HPA_LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 7));
+ if (size + cpu_addr >= SZ_4G) {
+ value |= HPA_LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar);
+ if ((flags & IORESOURCE_PREFETCH))
+ value |= HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar);
+ } else {
+ value |= HPA_LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar);
+ if ((flags & IORESOURCE_PREFETCH))
+ value |= HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar);
+ }
+
+ value |= HPA_LM_RC_BAR_CFG_APERTURE(bar, aperture);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG, CDNS_PCIE_HPA_LM_RC_BAR_CFG, value);
+
+ return 0;
+}
+
+static int cdns_pcie_hpa_host_init_root_port(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ u32 value, ctrl;
+
+ /*
+ * Set the root port BAR configuration register:
+ * - disable both BAR0 and BAR1
+ * - enable Prefetchable Memory Base and Limit registers in type 1
+ * config space (64 bits)
+ * - enable IO Base and Limit registers in type 1 config
+ * space (32 bits)
+ */
+
+ ctrl = CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_DISABLED;
+ value = CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_ENABLE |
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_32BITS;
+ cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG,
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG, value);
+
+ if (rc->vendor_id != 0xffff)
+ cdns_pcie_hpa_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id);
+
+ if (rc->device_id != 0xffff)
+ cdns_pcie_hpa_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
+
+ cdns_pcie_hpa_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
+ cdns_pcie_hpa_rp_writeb(pcie, PCI_CLASS_PROG, 0);
+ cdns_pcie_hpa_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
+
+ /* Enable bus mastering */
+ value = cdns_pcie_hpa_readl(pcie, REG_BANK_RP, PCI_COMMAND);
+ value |= (PCI_COMMAND_MEMORY | PCI_COMMAND_IO | PCI_COMMAND_MASTER);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_RP, PCI_COMMAND, value);
+ return 0;
+}
+
+static void cdns_pcie_hpa_create_region_for_cfg(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
+ struct resource *cfg_res = rc->cfg_res;
+ struct resource_entry *entry;
+ u64 cpu_addr = cfg_res->start;
+ u32 addr0, addr1, desc1;
+ int busnr = 0;
+
+ entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (entry)
+ busnr = entry->res->start;
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_TAG_MANAGEMENT, 0x01000000);
+ /*
+ * Reserve region 0 for PCI configure space accesses:
+ * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
+ * cdns_pci_map_bus(), other region registers are set here once for all
+ */
+ desc1 = CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(busnr);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(0), 0x0);
+ /* Type-1 CFG */
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC0(0), 0x05000000);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0), desc1);
+
+ addr0 = CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
+ (lower_32_bits(cpu_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(cpu_addr);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(0), addr0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(0), addr1);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(0), 0x06000000);
+}
+
+static int cdns_pcie_hpa_host_init_address_translation(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
+ struct resource_entry *entry;
+ int r = 0, busnr = 0;
+
+ if (!rc->ecam_supported)
+ cdns_pcie_hpa_create_region_for_cfg(rc);
+
+ entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (entry)
+ busnr = entry->res->start;
+
+ r++;
+ if (pcie->msg_res) {
+ cdns_pcie_hpa_set_outbound_region_for_normal_msg(pcie, busnr, 0, r,
+ pcie->msg_res->start);
+
+ r++;
+ }
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ struct resource *res = entry->res;
+ u64 pci_addr = res->start - entry->offset;
+
+ if (resource_type(res) == IORESOURCE_IO)
+ cdns_pcie_hpa_set_outbound_region(pcie, busnr, 0, r,
+ true,
+ pci_pio_to_address(res->start),
+ pci_addr,
+ resource_size(res));
+ else
+ cdns_pcie_hpa_set_outbound_region(pcie, busnr, 0, r,
+ false,
+ res->start,
+ pci_addr,
+ resource_size(res));
+
+ r++;
+ }
+
+ if (rc->no_inbound_map)
+ return 0;
+ else
+ return cdns_pcie_host_map_dma_ranges(rc, cdns_pcie_hpa_host_bar_ib_config);
+}
+
+static int cdns_pcie_hpa_host_init(struct cdns_pcie_rc *rc)
+{
+ int err;
+
+ err = cdns_pcie_hpa_host_init_root_port(rc);
+ if (err)
+ return err;
+
+ return cdns_pcie_hpa_host_init_address_translation(rc);
+}
+
+int cdns_pcie_hpa_host_link_setup(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct device *dev = rc->pcie.dev;
+ int ret;
+
+ if (rc->quirk_detect_quiet_flag)
+ cdns_pcie_hpa_detect_quiet_min_delay_set(&rc->pcie);
+
+ cdns_pcie_hpa_host_enable_ptm_response(pcie);
+
+ ret = cdns_pcie_start_link(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to start link\n");
+ return ret;
+ }
+
+ ret = cdns_pcie_host_wait_for_link(pcie, cdns_pcie_hpa_link_up);
+ if (ret)
+ dev_dbg(dev, "PCIe link never came up\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_hpa_host_link_setup);
+
+int cdns_pcie_hpa_host_setup(struct cdns_pcie_rc *rc)
+{
+ struct device *dev = rc->pcie.dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pci_host_bridge *bridge;
+ enum cdns_pcie_rp_bar bar;
+ struct cdns_pcie *pcie;
+ struct resource *res;
+ int ret;
+
+ bridge = pci_host_bridge_from_priv(rc);
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = &rc->pcie;
+ pcie->is_rc = true;
+
+ if (!pcie->reg_base) {
+ pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg");
+ if (IS_ERR(pcie->reg_base)) {
+ dev_err(dev, "missing \"reg\"\n");
+ return PTR_ERR(pcie->reg_base);
+ }
+ }
+
+ /* ECAM config space is remapped at glue layer */
+ if (!rc->cfg_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ rc->cfg_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(rc->cfg_base))
+ return PTR_ERR(rc->cfg_base);
+ rc->cfg_res = res;
+ }
+
+ /* Put EROM Bar aperture to 0 */
+ cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG, CDNS_PCIE_EROM, 0x0);
+
+ ret = cdns_pcie_hpa_host_link_setup(rc);
+ if (ret)
+ return ret;
+
+ for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
+ rc->avail_ib_bar[bar] = true;
+
+ ret = cdns_pcie_hpa_host_init(rc);
+ if (ret)
+ return ret;
+
+ if (!bridge->ops)
+ bridge->ops = &cdns_pcie_hpa_host_ops;
+
+ return pci_host_probe(bridge);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_hpa_host_setup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe host controller driver");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 940c7dd701d6..db3154c1eccb 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -5,18 +5,14 @@
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/list_sort.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/platform_device.h>
#include "pcie-cadence.h"
-
-static u64 bar_max_size[] = {
- [RP_BAR0] = _ULL(128 * SZ_2G),
- [RP_BAR1] = SZ_2G,
- [RP_NO_BAR] = _BITULL(63),
-};
+#include "pcie-cadence-host-common.h"
static u8 bar_aperture_mask[] = {
[RP_BAR0] = 0x1F,
@@ -70,6 +66,7 @@ void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
return rc->cfg_base + (where & 0xfff);
}
+EXPORT_SYMBOL_GPL(cdns_pci_map_bus);
static struct pci_ops cdns_pcie_host_ops = {
.map_bus = cdns_pci_map_bus,
@@ -77,50 +74,12 @@ static struct pci_ops cdns_pcie_host_ops = {
.write = pci_generic_config_write,
};
-static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie)
+static void cdns_pcie_host_disable_ptm_response(struct cdns_pcie *pcie)
{
- struct device *dev = pcie->dev;
- int retries;
-
- /* Check if the link is up or not */
- for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
- if (cdns_pcie_link_up(pcie)) {
- dev_info(dev, "Link up\n");
- return 0;
- }
- usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
- }
-
- return -ETIMEDOUT;
-}
-
-static int cdns_pcie_retrain(struct cdns_pcie *pcie)
-{
- u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
- u16 lnk_stat, lnk_ctl;
- int ret = 0;
-
- /*
- * Set retrain bit if current speed is 2.5 GB/s,
- * but the PCIe root port support is > 2.5 GB/s.
- */
-
- lnk_cap_sls = cdns_pcie_readl(pcie, (CDNS_PCIE_RP_BASE + pcie_cap_off +
- PCI_EXP_LNKCAP));
- if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
- return ret;
-
- lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
- if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
- lnk_ctl = cdns_pcie_rp_readw(pcie,
- pcie_cap_off + PCI_EXP_LNKCTL);
- lnk_ctl |= PCI_EXP_LNKCTL_RL;
- cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL,
- lnk_ctl);
+ u32 val;
- ret = cdns_pcie_host_wait_for_link(pcie);
- }
- return ret;
+ val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val & ~CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
}
static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie)
@@ -131,21 +90,24 @@ static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie)
cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val | CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
}
-static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
+static void cdns_pcie_host_deinit_root_port(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
- int ret;
-
- ret = cdns_pcie_host_wait_for_link(pcie);
-
- /*
- * Retrain link for Gen2 training defect
- * if quirk flag is set.
- */
- if (!ret && rc->quirk_retrain_flag)
- ret = cdns_pcie_retrain(pcie);
+ u32 value, ctrl;
- return ret;
+ cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, 0xffff);
+ cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0xff);
+ cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0xff);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, 0xffffffff);
+ cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, 0xffff);
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
+ value = ~(CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
+ CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
+ CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
+ CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
+ CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
+ CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
}
static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
@@ -188,10 +150,11 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
return 0;
}
-static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc,
- enum cdns_pcie_rp_bar bar,
- u64 cpu_addr, u64 size,
- unsigned long flags)
+int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc,
+ enum cdns_pcie_rp_bar bar,
+ u64 cpu_addr,
+ u64 size,
+ unsigned long flags)
{
struct cdns_pcie *pcie = &rc->pcie;
u32 addr0, addr1, aperture, value;
@@ -233,172 +196,53 @@ static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc,
return 0;
}
-static enum cdns_pcie_rp_bar
-cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size)
+static void cdns_pcie_host_unmap_dma_ranges(struct cdns_pcie_rc *rc)
{
- enum cdns_pcie_rp_bar bar, sel_bar;
+ struct cdns_pcie *pcie = &rc->pcie;
+ enum cdns_pcie_rp_bar bar;
+ u32 value;
- sel_bar = RP_BAR_UNDEFINED;
+ /* Reset inbound configuration for all BARs which were being used */
for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
- if (!rc->avail_ib_bar[bar])
+ if (rc->avail_ib_bar[bar])
continue;
- if (size <= bar_max_size[bar]) {
- if (sel_bar == RP_BAR_UNDEFINED) {
- sel_bar = bar;
- continue;
- }
-
- if (bar_max_size[bar] < bar_max_size[sel_bar])
- sel_bar = bar;
- }
- }
-
- return sel_bar;
-}
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), 0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), 0);
-static enum cdns_pcie_rp_bar
-cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size)
-{
- enum cdns_pcie_rp_bar bar, sel_bar;
-
- sel_bar = RP_BAR_UNDEFINED;
- for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
- if (!rc->avail_ib_bar[bar])
+ if (bar == RP_NO_BAR)
continue;
- if (size >= bar_max_size[bar]) {
- if (sel_bar == RP_BAR_UNDEFINED) {
- sel_bar = bar;
- continue;
- }
-
- if (bar_max_size[bar] > bar_max_size[sel_bar])
- sel_bar = bar;
- }
+ value = ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
+ LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2));
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
}
-
- return sel_bar;
}
-static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
- struct resource_entry *entry)
+static void cdns_pcie_host_deinit_address_translation(struct cdns_pcie_rc *rc)
{
- u64 cpu_addr, pci_addr, size, winsize;
struct cdns_pcie *pcie = &rc->pcie;
- struct device *dev = pcie->dev;
- enum cdns_pcie_rp_bar bar;
- unsigned long flags;
- int ret;
-
- cpu_addr = entry->res->start;
- pci_addr = entry->res->start - entry->offset;
- flags = entry->res->flags;
- size = resource_size(entry->res);
-
- if (entry->offset) {
- dev_err(dev, "PCI addr: %llx must be equal to CPU addr: %llx\n",
- pci_addr, cpu_addr);
- return -EINVAL;
- }
-
- while (size > 0) {
- /*
- * Try to find a minimum BAR whose size is greater than
- * or equal to the remaining resource_entry size. This will
- * fail if the size of each of the available BARs is less than
- * the remaining resource_entry size.
- * If a minimum BAR is found, IB ATU will be configured and
- * exited.
- */
- bar = cdns_pcie_host_find_min_bar(rc, size);
- if (bar != RP_BAR_UNDEFINED) {
- ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr,
- size, flags);
- if (ret)
- dev_err(dev, "IB BAR: %d config failed\n", bar);
- return ret;
- }
-
- /*
- * If the control reaches here, it would mean the remaining
- * resource_entry size cannot be fitted in a single BAR. So we
- * find a maximum BAR whose size is less than or equal to the
- * remaining resource_entry size and split the resource entry
- * so that part of resource entry is fitted inside the maximum
- * BAR. The remaining size would be fitted during the next
- * iteration of the loop.
- * If a maximum BAR is not found, there is no way we can fit
- * this resource_entry, so we error out.
- */
- bar = cdns_pcie_host_find_max_bar(rc, size);
- if (bar == RP_BAR_UNDEFINED) {
- dev_err(dev, "No free BAR to map cpu_addr %llx\n",
- cpu_addr);
- return -EINVAL;
- }
-
- winsize = bar_max_size[bar];
- ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize,
- flags);
- if (ret) {
- dev_err(dev, "IB BAR: %d config failed\n", bar);
- return ret;
- }
-
- size -= winsize;
- cpu_addr += winsize;
- }
-
- return 0;
-}
-
-static int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a,
- const struct list_head *b)
-{
- struct resource_entry *entry1, *entry2;
-
- entry1 = container_of(a, struct resource_entry, node);
- entry2 = container_of(b, struct resource_entry, node);
-
- return resource_size(entry2->res) - resource_size(entry1->res);
-}
-
-static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
-{
- struct cdns_pcie *pcie = &rc->pcie;
- struct device *dev = pcie->dev;
- struct device_node *np = dev->of_node;
- struct pci_host_bridge *bridge;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
struct resource_entry *entry;
- u32 no_bar_nbits = 32;
- int err;
-
- bridge = pci_host_bridge_from_priv(rc);
- if (!bridge)
- return -ENOMEM;
+ int r;
- if (list_empty(&bridge->dma_ranges)) {
- of_property_read_u32(np, "cdns,no-bar-match-nbits",
- &no_bar_nbits);
- err = cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0,
- (u64)1 << no_bar_nbits, 0);
- if (err)
- dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR);
- return err;
- }
+ cdns_pcie_host_unmap_dma_ranges(rc);
- list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp);
+ /*
+ * Reset outbound region 0 which was reserved for configuration space
+ * accesses.
+ */
+ cdns_pcie_reset_outbound_region(pcie, 0);
- resource_list_for_each_entry(entry, &bridge->dma_ranges) {
- err = cdns_pcie_host_bar_config(rc, entry);
- if (err) {
- dev_err(dev, "Fail to configure IB using dma-ranges\n");
- return err;
- }
+ /* Reset rest of the outbound regions */
+ r = 1;
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ cdns_pcie_reset_outbound_region(pcie, r);
+ r++;
}
-
- return 0;
}
static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
@@ -425,7 +269,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
- if (pcie->ops->cpu_addr_fixup)
+ if (pcie->ops && pcie->ops->cpu_addr_fixup)
cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
@@ -455,11 +299,16 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
r++;
}
- return cdns_pcie_host_map_dma_ranges(rc);
+ return cdns_pcie_host_map_dma_ranges(rc, cdns_pcie_host_bar_ib_config);
+}
+
+static void cdns_pcie_host_deinit(struct cdns_pcie_rc *rc)
+{
+ cdns_pcie_host_deinit_address_translation(rc);
+ cdns_pcie_host_deinit_root_port(rc);
}
-static int cdns_pcie_host_init(struct device *dev,
- struct cdns_pcie_rc *rc)
+int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
{
int err;
@@ -469,6 +318,53 @@ static int cdns_pcie_host_init(struct device *dev,
return cdns_pcie_host_init_address_translation(rc);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_init);
+
+static void cdns_pcie_host_link_disable(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+
+ cdns_pcie_stop_link(pcie);
+ cdns_pcie_host_disable_ptm_response(pcie);
+}
+
+int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct device *dev = rc->pcie.dev;
+ int ret;
+
+ if (rc->quirk_detect_quiet_flag)
+ cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
+
+ cdns_pcie_host_enable_ptm_response(pcie);
+
+ ret = cdns_pcie_start_link(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to start link\n");
+ return ret;
+ }
+
+ ret = cdns_pcie_host_start_link(rc, cdns_pcie_link_up);
+ if (ret)
+ dev_dbg(dev, "PCIe link never came up\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_link_setup);
+
+void cdns_pcie_host_disable(struct cdns_pcie_rc *rc)
+{
+ struct pci_host_bridge *bridge;
+
+ bridge = pci_host_bridge_from_priv(rc);
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+
+ cdns_pcie_host_deinit(rc);
+ cdns_pcie_host_link_disable(rc);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_disable);
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
@@ -506,39 +402,24 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
return PTR_ERR(rc->cfg_base);
rc->cfg_res = res;
- if (rc->quirk_detect_quiet_flag)
- cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
-
- cdns_pcie_host_enable_ptm_response(pcie);
-
- ret = cdns_pcie_start_link(pcie);
- if (ret) {
- dev_err(dev, "Failed to start link\n");
- return ret;
- }
-
- ret = cdns_pcie_host_start_link(rc);
+ ret = cdns_pcie_host_link_setup(rc);
if (ret)
- dev_dbg(dev, "PCIe link never came up\n");
+ return ret;
for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
rc->avail_ib_bar[bar] = true;
- ret = cdns_pcie_host_init(dev, rc);
+ ret = cdns_pcie_host_init(rc);
if (ret)
return ret;
if (!bridge->ops)
bridge->ops = &cdns_pcie_host_ops;
- ret = pci_host_probe(bridge);
- if (ret < 0)
- goto err_init;
-
- return 0;
-
- err_init:
- pm_runtime_put_sync(dev);
-
- return ret;
+ return pci_host_probe(bridge);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_setup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe host controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-hpa-regs.h b/drivers/pci/controller/cadence/pcie-cadence-hpa-regs.h
new file mode 100644
index 000000000000..026e131600de
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-hpa-regs.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence PCIe controller driver.
+ *
+ * Copyright (c) 2024, Cadence Design Systems
+ * Author: Manikandan K Pillai <mpillai@cadence.com>
+ */
+#ifndef _PCIE_CADENCE_HPA_REGS_H
+#define _PCIE_CADENCE_HPA_REGS_H
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci-epf.h>
+#include <linux/phy/phy.h>
+#include <linux/bitfield.h>
+
+/* High Performance Architecture (HPA) PCIe controller registers */
+#define CDNS_PCIE_HPA_IP_REG_BANK 0x01000000
+#define CDNS_PCIE_HPA_IP_CFG_CTRL_REG_BANK 0x01003C00
+#define CDNS_PCIE_HPA_IP_AXI_MASTER_COMMON 0x02020000
+
+/* Address Translation Registers */
+#define CDNS_PCIE_HPA_AXI_SLAVE 0x03000000
+#define CDNS_PCIE_HPA_AXI_MASTER 0x03002000
+
+/* Root Port register base address */
+#define CDNS_PCIE_HPA_RP_BASE 0x0
+
+#define CDNS_PCIE_HPA_LM_ID 0x1420
+
+/* Endpoint Function BARs */
+#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG(bar, fn) \
+ (((bar) < BAR_3) ? CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG0(fn) : \
+ CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG1(fn))
+#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG0(pfn) (0x4000 * (pfn))
+#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG1(pfn) ((0x4000 * (pfn)) + 0x04)
+#define CDNS_PCIE_HPA_LM_EP_VFUNC_BAR_CFG(bar, fn) \
+ (((bar) < BAR_3) ? CDNS_PCIE_HPA_LM_EP_VFUNC_BAR_CFG0(fn) : \
+ CDNS_PCIE_HPA_LM_EP_VFUNC_BAR_CFG1(fn))
+#define CDNS_PCIE_HPA_LM_EP_VFUNC_BAR_CFG0(vfn) ((0x4000 * (vfn)) + 0x08)
+#define CDNS_PCIE_HPA_LM_EP_VFUNC_BAR_CFG1(vfn) ((0x4000 * (vfn)) + 0x0C)
+#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(f) \
+ (GENMASK(5, 0) << (0x4 + (f) * 10))
+#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
+ (((a) << (4 + ((b) * 10))) & (CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b)))
+#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(f) \
+ (GENMASK(3, 0) << ((f) * 10))
+#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
+ (((c) << ((b) * 10)) & (CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)))
+
+/* Endpoint Function Configuration Register */
+#define CDNS_PCIE_HPA_LM_EP_FUNC_CFG 0x02C0
+
+/* Root Complex BAR Configuration Register */
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG 0x14
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(9, 4)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
+ FIELD_PREP(CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_APERTURE_MASK, a)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(3, 0)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL(c) \
+ FIELD_PREP(CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL_MASK, c)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(19, 14)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
+ FIELD_PREP(CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_APERTURE_MASK, a)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(13, 10)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_CTRL(c) \
+ FIELD_PREP(CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_CTRL_MASK, c)
+
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(20)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(21)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_ENABLE BIT(22)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_32BITS BIT(23)
+
+/* BAR control values applicable to both Endpoint Function and Root Complex */
+#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_DISABLED 0x0
+#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_IO_32BITS 0x3
+#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_MEM_32BITS 0x1
+#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x9
+#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_MEM_64BITS 0x5
+#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0xD
+
+#define HPA_LM_RC_BAR_CFG_CTRL_DISABLED(bar) \
+ (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_DISABLED << ((bar) * 10))
+#define HPA_LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \
+ (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_IO_32BITS << ((bar) * 10))
+#define HPA_LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \
+ (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_MEM_32BITS << ((bar) * 10))
+#define HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \
+ (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << ((bar) * 10))
+#define HPA_LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \
+ (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_MEM_64BITS << ((bar) * 10))
+#define HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \
+ (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << ((bar) * 10))
+#define HPA_LM_RC_BAR_CFG_APERTURE(bar, aperture) \
+ (((aperture) - 7) << (((bar) * 10) + 4))
+
+#define CDNS_PCIE_HPA_LM_PTM_CTRL 0x0520
+#define CDNS_PCIE_HPA_LM_PTM_CTRL_PTMRSEN BIT(17)
+
+/* Root Port Registers PCI config space for root port function */
+#define CDNS_PCIE_HPA_RP_CAP_OFFSET 0xC0
+
+/* Region r Outbound AXI to PCIe Address Translation Register 0 */
+#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(r) (0x1010 + ((r) & 0x1F) * 0x0080)
+#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
+#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(23, 16)
+#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK, devfn)
+#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(31, 24)
+#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS_MASK, bus)
+
+/* Region r Outbound AXI to PCIe Address Translation Register 1 */
+#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(r) (0x1014 + ((r) & 0x1F) * 0x0080)
+
+/* Region r Outbound PCIe Descriptor Register */
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0(r) (0x1008 + ((r) & 0x1F) * 0x0080)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(28, 24)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MEM \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK, 0x0)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_IO \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK, 0x2)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK, 0x4)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK, 0x5)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK, 0x10)
+
+/* Region r Outbound PCIe Descriptor Register */
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC1(r) (0x100C + ((r) & 0x1F) * 0x0080)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS_MASK GENMASK(31, 24)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(bus) \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS_MASK, bus)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN_MASK GENMASK(23, 16)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(devfn) \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN_MASK, devfn)
+
+#define CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(r) (0x1018 + ((r) & 0x1F) * 0x0080)
+#define CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS BIT(26)
+#define CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN BIT(25)
+
+/* Region r AXI Region Base Address Register 0 */
+#define CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(r) (0x1000 + ((r) & 0x1F) * 0x0080)
+#define CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
+
+/* Region r AXI Region Base Address Register 1 */
+#define CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(r) (0x1004 + ((r) & 0x1F) * 0x0080)
+
+/* Root Port BAR Inbound PCIe to AXI Address Translation Register */
+#define CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0(bar) (((bar) * 0x0008))
+#define CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
+#define CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR1(bar) (0x04 + ((bar) * 0x0008))
+
+/* AXI link down register */
+#define CDNS_PCIE_HPA_AT_LINKDOWN 0x04
+
+/*
+ * Physical Layer Configuration Register 0
+ * This register contains the parameters required for functional setup
+ * of Physical Layer.
+ */
+#define CDNS_PCIE_HPA_PHY_LAYER_CFG0 0x0400
+#define CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY_MASK GENMASK(26, 24)
+#define CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY(delay) \
+ FIELD_PREP(CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY_MASK, delay)
+#define CDNS_PCIE_HPA_LINK_TRNG_EN_MASK GENMASK(27, 27)
+
+#define CDNS_PCIE_HPA_PHY_DBG_STS_REG0 0x0420
+
+#define CDNS_PCIE_HPA_RP_MAX_IB 0x3
+#define CDNS_PCIE_HPA_MAX_OB 15
+
+/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
+#define CDNS_PCIE_HPA_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) (((fn) * 0x0080) + ((bar) * 0x0008))
+#define CDNS_PCIE_HPA_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) (0x4 + ((fn) * 0x0080) + ((bar) * 0x0008))
+
+/* Miscellaneous offsets definitions */
+#define CDNS_PCIE_HPA_TAG_MANAGEMENT 0x0
+#define CDNS_PCIE_HPA_SLAVE_RESP 0x100
+
+#define I_ROOT_PORT_REQ_ID_REG 0x141c
+#define LM_HAL_SBSA_CTRL 0x1170
+
+#define I_PCIE_BUS_NUMBERS (CDNS_PCIE_HPA_RP_BASE + 0x18)
+#define CDNS_PCIE_EROM 0x18
+#endif /* _PCIE_CADENCE_HPA_REGS_H */
diff --git a/drivers/pci/controller/cadence/pcie-cadence-hpa.c b/drivers/pci/controller/cadence/pcie-cadence-hpa.c
new file mode 100644
index 000000000000..f60a16938265
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-hpa.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence PCIe controller driver.
+ *
+ * Copyright (c) 2024, Cadence Design Systems
+ * Author: Manikandan K Pillai <mpillai@cadence.com>
+ */
+#include <linux/kernel.h>
+#include <linux/of.h>
+
+#include "pcie-cadence.h"
+
+bool cdns_pcie_hpa_link_up(struct cdns_pcie *pcie)
+{
+ u32 pl_reg_val;
+
+ pl_reg_val = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_PHY_DBG_STS_REG0);
+ if (pl_reg_val & GENMASK(0, 0))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_hpa_link_up);
+
+void cdns_pcie_hpa_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
+{
+ u32 delay = 0x3;
+ u32 ltssm_control_cap;
+
+ /* Set the LTSSM Detect Quiet state min. delay to 2ms */
+ ltssm_control_cap = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_REG,
+ CDNS_PCIE_HPA_PHY_LAYER_CFG0);
+ ltssm_control_cap = ((ltssm_control_cap &
+ ~CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY_MASK) |
+ CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY(delay));
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_IP_REG,
+ CDNS_PCIE_HPA_PHY_LAYER_CFG0, ltssm_control_cap);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_hpa_detect_quiet_min_delay_set);
+
+void cdns_pcie_hpa_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
+ u32 r, bool is_io,
+ u64 cpu_addr, u64 pci_addr, size_t size)
+{
+ /*
+ * roundup_pow_of_two() returns an unsigned long, which is not suited
+ * for 64bit values
+ */
+ u64 sz = 1ULL << fls64(size - 1);
+ int nbits = ilog2(sz);
+ u32 addr0, addr1, desc0, desc1, ctrl0;
+
+ if (nbits < 8)
+ nbits = 8;
+
+ /* Set the PCI address */
+ addr0 = CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
+ (lower_32_bits(pci_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(pci_addr);
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(r), addr0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(r), addr1);
+
+ /* Set the PCIe header descriptor */
+ if (is_io)
+ desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_IO;
+ else
+ desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MEM;
+ desc1 = 0;
+ ctrl0 = 0;
+
+ /*
+ * Whether Bit [26] is set or not inside DESC0 register of the outbound
+ * PCIe descriptor, the PCI function number must be set into
+ * Bits [31:24] of DESC1 anyway.
+ *
+ * In Root Complex mode, the function number is always 0 but in Endpoint
+ * mode, the PCIe controller may support more than one function. This
+ * function number needs to be set properly into the outbound PCIe
+ * descriptor.
+ *
+ * Besides, setting Bit [26] is mandatory when in Root Complex mode:
+ * then the driver must provide the bus, resp. device, number in
+ * Bits [31:24] of DESC1, resp. Bits[23:16] of DESC0. Like the function
+ * number, the device number is always 0 in Root Complex mode.
+ *
+ * However when in Endpoint mode, we can clear Bit [26] of DESC0, hence
+ * the PCIe controller will use the captured values for the bus and
+ * device numbers.
+ */
+ if (pcie->is_rc) {
+ /* The device and function numbers are always 0 */
+ desc1 = CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(busnr) |
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(0);
+ ctrl0 = CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS |
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN;
+ } else {
+ /*
+ * Use captured values for bus and device numbers but still
+ * need to set the function number
+ */
+ desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(fn);
+ }
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC0(r), desc0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1(r), desc1);
+
+ addr0 = CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
+ (lower_32_bits(cpu_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(cpu_addr);
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(r), addr0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(r), addr1);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(r), ctrl0);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_hpa_set_outbound_region);
+
+void cdns_pcie_hpa_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
+ u8 busnr, u8 fn,
+ u32 r, u64 cpu_addr)
+{
+ u32 addr0, addr1, desc0, desc1, ctrl0;
+
+ desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
+ desc1 = 0;
+ ctrl0 = 0;
+
+ /* See cdns_pcie_set_outbound_region() comments above */
+ if (pcie->is_rc) {
+ desc1 = CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(busnr) |
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(0);
+ ctrl0 = CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS |
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN;
+ } else {
+ desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(fn);
+ }
+
+ addr0 = CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
+ (lower_32_bits(cpu_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(cpu_addr);
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(r), 0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(r), 0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC0(r), desc0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1(r), desc1);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(r), addr0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(r), addr1);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(r), ctrl0);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_hpa_set_outbound_region_for_normal_msg);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe controller driver");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-lga-regs.h b/drivers/pci/controller/cadence/pcie-cadence-lga-regs.h
new file mode 100644
index 000000000000..857b2140c5d2
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-lga-regs.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence PCIe controller driver.
+ *
+ * Copyright (c) 2017 Cadence
+ * Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+ */
+#ifndef _PCIE_CADENCE_LGA_REGS_H
+#define _PCIE_CADENCE_LGA_REGS_H
+
+#include <linux/bitfield.h>
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+
+/* Local Management Registers */
+#define CDNS_PCIE_LM_BASE 0x00100000
+
+/* Vendor ID Register */
+#define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044)
+#define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0)
+#define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0
+#define CDNS_PCIE_LM_ID_VENDOR(vid) \
+ (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
+#define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16)
+#define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16
+#define CDNS_PCIE_LM_ID_SUBSYS(sub) \
+ (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
+
+/* Root Port Requester ID Register */
+#define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228)
+#define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0)
+#define CDNS_PCIE_LM_RP_RID_SHIFT 0
+#define CDNS_PCIE_LM_RP_RID_(rid) \
+ (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK)
+
+/* Endpoint Bus and Device Number Register */
+#define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022C)
+#define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0)
+#define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0
+#define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8)
+#define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8
+
+/* Endpoint Function f BAR b Configuration Registers */
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn) \
+ (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn))
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn) \
+ (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn))
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
+ (GENMASK(4, 0) << ((b) * 8))
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
+ (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
+ (GENMASK(7, 5) << ((b) * 8))
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
+ (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+
+/* Endpoint Function Configuration Register */
+#define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02C0)
+
+/* Root Complex BAR Configuration Register */
+#define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
+ (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
+ (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
+ (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
+ (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
+#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17)
+#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0
+#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18)
+#define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19)
+#define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0
+#define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20)
+#define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31)
+
+/* BAR control values applicable to both Endpoint Function and Root Complex */
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
+
+#define LM_RC_BAR_CFG_CTRL_DISABLED(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_APERTURE(bar, aperture) \
+ (((aperture) - 2) << ((bar) * 8))
+
+/* PTM Control Register */
+#define CDNS_PCIE_LM_PTM_CTRL (CDNS_PCIE_LM_BASE + 0x0DA8)
+#define CDNS_PCIE_LM_TPM_CTRL_PTMRSEN BIT(17)
+
+/*
+ * Endpoint Function Registers (PCI configuration space for endpoint functions)
+ */
+#define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
+
+#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
+#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xB0
+#define CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET 0xC0
+#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
+
+/* Endpoint PF Registers */
+#define CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(fn) (0x144 + (fn) * 0x1000)
+#define CDNS_PCIE_ARI_CAP_NFN_MASK GENMASK(15, 8)
+
+/* Root Port Registers (PCI configuration space for the root port function) */
+#define CDNS_PCIE_RP_BASE 0x00200000
+#define CDNS_PCIE_RP_CAP_OFFSET 0xC0
+
+/* Address Translation Registers */
+#define CDNS_PCIE_AT_BASE 0x00400000
+
+/* Region r Outbound AXI to PCIe Address Translation Register 0 */
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
+ (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1F) * 0x0020)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
+ (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
+ (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
+
+/* Region r Outbound AXI to PCIe Address Translation Register 1 */
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
+ (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1F) * 0x0020)
+
+/* Region r Outbound PCIe Descriptor Register 0 */
+#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
+ (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1F) * 0x0020)
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0)
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xA
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xB
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xC
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xD
+/* Bit 23 MUST be set in RC mode. */
+#define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
+#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
+#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
+ (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
+
+/* Region r Outbound PCIe Descriptor Register 1 */
+#define CDNS_PCIE_AT_OB_REGION_DESC1(r) \
+ (CDNS_PCIE_AT_BASE + 0x000C + ((r) & 0x1F) * 0x0020)
+#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0)
+#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
+ ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
+
+/* Region r AXI Region Base Address Register 0 */
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
+ (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1F) * 0x0020)
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
+
+/* Region r AXI Region Base Address Register 1 */
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
+ (CDNS_PCIE_AT_BASE + 0x001C + ((r) & 0x1F) * 0x0020)
+
+/* Root Port BAR Inbound PCIe to AXI Address Translation Register */
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
+ (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
+ (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
+
+/* AXI link down register */
+#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
+
+/* LTSSM Capabilities register */
+#define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054)
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1)
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
+ (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
+ CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
+
+#define CDNS_PCIE_RP_MAX_IB 0x3
+#define CDNS_PCIE_MAX_OB 32
+
+/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
+#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
+ (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
+#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
+ (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
+
+/* Normal/Vendor specific message access: offset inside some outbound region */
+#define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5)
+#define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \
+ (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK)
+#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
+#define CDNS_PCIE_NORMAL_MSG_CODE(code) \
+ (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
+#define CDNS_PCIE_MSG_NO_DATA BIT(16)
+
+#endif /* _PCIE_CADENCE_LGA_REGS_H */
diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c
index bac0541317c1..b067a3296dd3 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-plat.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c
@@ -6,11 +6,10 @@
* Author: Tom Joseph <tjoseph@cadence.com>
*/
#include <linux/kernel.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/of_device.h>
#include "pcie-cadence.h"
#define CDNS_PLAT_CPU_TO_BUS_ADDR 0x0FFFFFFF
@@ -18,16 +17,9 @@
/**
* struct cdns_plat_pcie - private data for this PCIe platform driver
* @pcie: Cadence PCIe controller
- * @is_rc: Set to 1 indicates the PCIe controller mode is Root Complex,
- * if 0 it is in Endpoint mode.
*/
struct cdns_plat_pcie {
struct cdns_pcie *pcie;
- bool is_rc;
-};
-
-struct cdns_plat_pcie_of_data {
- bool is_rc;
};
static const struct of_device_id cdns_plat_pcie_of_match[];
@@ -77,7 +69,6 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev)
rc->pcie.dev = dev;
rc->pcie.ops = &cdns_plat_ops;
cdns_plat_pcie->pcie = &rc->pcie;
- cdns_plat_pcie->is_rc = is_rc;
ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie);
if (ret) {
@@ -105,7 +96,6 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev)
ep->pcie.dev = dev;
ep->pcie.ops = &cdns_plat_ops;
cdns_plat_pcie->pcie = &ep->pcie;
- cdns_plat_pcie->is_rc = is_rc;
ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie);
if (ret) {
@@ -183,4 +173,7 @@ static struct platform_driver cdns_plat_pcie_driver = {
.probe = cdns_plat_pcie_probe,
.shutdown = cdns_plat_pcie_shutdown,
};
-builtin_platform_driver(cdns_plat_pcie_driver);
+module_platform_driver(cdns_plat_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe controller platform driver");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 13c4032ca379..e6f1a4ac0fb7 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -4,8 +4,35 @@
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include "pcie-cadence.h"
+#include "../../pci.h"
+
+u8 cdns_pcie_find_capability(struct cdns_pcie *pcie, u8 cap)
+{
+ return PCI_FIND_NEXT_CAP(cdns_pcie_read_cfg, PCI_CAPABILITY_LIST,
+ cap, pcie);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_find_capability);
+
+u16 cdns_pcie_find_ext_capability(struct cdns_pcie *pcie, u8 cap)
+{
+ return PCI_FIND_NEXT_EXT_CAP(cdns_pcie_read_cfg, 0, cap, pcie);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_find_ext_capability);
+
+bool cdns_pcie_linkup(struct cdns_pcie *pcie)
+{
+ u32 pl_reg_val;
+
+ pl_reg_val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE);
+ if (pl_reg_val & GENMASK(0, 0))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_linkup);
void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
{
@@ -22,6 +49,7 @@ void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_detect_quiet_min_delay_set);
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
u32 r, bool is_io,
@@ -89,7 +117,7 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
/* Set the CPU address */
- if (pcie->ops->cpu_addr_fixup)
+ if (pcie->ops && pcie->ops->cpu_addr_fixup)
cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
@@ -99,6 +127,7 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region);
void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
u8 busnr, u8 fn,
@@ -119,7 +148,7 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
}
/* Set the CPU address */
- if (pcie->ops->cpu_addr_fixup)
+ if (pcie->ops && pcie->ops->cpu_addr_fixup)
cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
@@ -133,6 +162,7 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region_for_normal_msg);
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
{
@@ -145,6 +175,7 @@ void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_reset_outbound_region);
void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
{
@@ -155,6 +186,7 @@ void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
phy_exit(pcie->phy[i]);
}
}
+EXPORT_SYMBOL_GPL(cdns_pcie_disable_phy);
int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
{
@@ -183,6 +215,7 @@ err_phy:
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_enable_phy);
int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
{
@@ -196,7 +229,7 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
phy_count = of_property_count_strings(np, "phy-names");
if (phy_count < 1) {
- dev_err(dev, "no phy-names. PHY will not be initialized\n");
+ dev_info(dev, "no \"phy-names\" property found; PHY will not be initialized\n");
pcie->phy_count = 0;
return 0;
}
@@ -242,6 +275,7 @@ err_phy:
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_init_phy);
static int cdns_pcie_suspend_noirq(struct device *dev)
{
@@ -259,7 +293,7 @@ static int cdns_pcie_resume_noirq(struct device *dev)
ret = cdns_pcie_enable_phy(pcie);
if (ret) {
- dev_err(dev, "failed to enable phy\n");
+ dev_err(dev, "failed to enable PHY\n");
return ret;
}
@@ -270,3 +304,8 @@ const struct dev_pm_ops cdns_pcie_pm_ops = {
NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
cdns_pcie_resume_noirq)
};
+EXPORT_SYMBOL_GPL(cdns_pcie_pm_ops);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 190786e47df9..443033c607d7 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -7,210 +7,12 @@
#define _PCIE_CADENCE_H
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci-epf.h>
#include <linux/phy/phy.h>
-
-/* Parameters for the waiting for link up routine */
-#define LINK_WAIT_MAX_RETRIES 10
-#define LINK_WAIT_USLEEP_MIN 90000
-#define LINK_WAIT_USLEEP_MAX 100000
-
-/*
- * Local Management Registers
- */
-#define CDNS_PCIE_LM_BASE 0x00100000
-
-/* Vendor ID Register */
-#define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044)
-#define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0)
-#define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0
-#define CDNS_PCIE_LM_ID_VENDOR(vid) \
- (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
-#define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16)
-#define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16
-#define CDNS_PCIE_LM_ID_SUBSYS(sub) \
- (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
-
-/* Root Port Requestor ID Register */
-#define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228)
-#define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0)
-#define CDNS_PCIE_LM_RP_RID_SHIFT 0
-#define CDNS_PCIE_LM_RP_RID_(rid) \
- (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK)
-
-/* Endpoint Bus and Device Number Register */
-#define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c)
-#define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0)
-#define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0
-#define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8)
-#define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8
-
-/* Endpoint Function f BAR b Configuration Registers */
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn) \
- (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn))
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
- (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
- (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
-#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn) \
- (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn))
-#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \
- (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008)
-#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \
- (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008)
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
- (GENMASK(4, 0) << ((b) * 8))
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
- (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
- (GENMASK(7, 5) << ((b) * 8))
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
- (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
-
-/* Endpoint Function Configuration Register */
-#define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0)
-
-/* Root Complex BAR Configuration Register */
-#define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
- (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
- (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
- (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
- (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
-#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17)
-#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0
-#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18)
-#define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19)
-#define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0
-#define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20)
-#define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31)
-
-/* BAR control values applicable to both Endpoint Function and Root Complex */
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
-
-#define LM_RC_BAR_CFG_CTRL_DISABLED(bar) \
- (CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6))
-#define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \
- (CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6))
-#define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \
- (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6))
-#define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \
- (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6))
-#define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \
- (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6))
-#define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \
- (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6))
-#define LM_RC_BAR_CFG_APERTURE(bar, aperture) \
- (((aperture) - 2) << ((bar) * 8))
-
-/* PTM Control Register */
-#define CDNS_PCIE_LM_PTM_CTRL (CDNS_PCIE_LM_BASE + 0x0da8)
-#define CDNS_PCIE_LM_TPM_CTRL_PTMRSEN BIT(17)
-
-/*
- * Endpoint Function Registers (PCI configuration space for endpoint functions)
- */
-#define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
-
-#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
-#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
-#define CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET 0xc0
-#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
-
-/*
- * Root Port Registers (PCI configuration space for the root port function)
- */
-#define CDNS_PCIE_RP_BASE 0x00200000
-#define CDNS_PCIE_RP_CAP_OFFSET 0xc0
-
-/*
- * Address Translation Registers
- */
-#define CDNS_PCIE_AT_BASE 0x00400000
-
-/* Region r Outbound AXI to PCIe Address Translation Register 0 */
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
- (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
- (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
- (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
- (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
-
-/* Region r Outbound AXI to PCIe Address Translation Register 1 */
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
- (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
-
-/* Region r Outbound PCIe Descriptor Register 0 */
-#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
- (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0)
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd
-/* Bit 23 MUST be set in RC mode. */
-#define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
-#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
-#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
- (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
-
-/* Region r Outbound PCIe Descriptor Register 1 */
-#define CDNS_PCIE_AT_OB_REGION_DESC1(r) \
- (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
-#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0)
-#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
- ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
-
-/* Region r AXI Region Base Address Register 0 */
-#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
- (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
-#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
-#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
- (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
-
-/* Region r AXI Region Base Address Register 1 */
-#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
- (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
-
-/* Root Port BAR Inbound PCIe to AXI Address Translation Register */
-#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
- (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
-#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
-#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
- (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
-#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
- (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
-
-/* AXI link down register */
-#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
-
-/* LTSSM Capabilities register */
-#define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054)
-#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1)
-#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
-#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
- (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
- CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
+#include "pcie-cadence-lga-regs.h"
+#include "pcie-cadence-hpa-regs.h"
enum cdns_pcie_rp_bar {
RP_BAR_UNDEFINED = -1,
@@ -219,73 +21,63 @@ enum cdns_pcie_rp_bar {
RP_NO_BAR
};
-#define CDNS_PCIE_RP_MAX_IB 0x3
-#define CDNS_PCIE_MAX_OB 32
-
struct cdns_pcie_rp_ib_bar {
u64 size;
bool free;
};
-/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
-#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
- (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
-#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
- (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
-
-/* Normal/Vendor specific message access: offset inside some outbound region */
-#define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5)
-#define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \
- (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK)
-#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
-#define CDNS_PCIE_NORMAL_MSG_CODE(code) \
- (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
-#define CDNS_PCIE_MSG_NO_DATA BIT(16)
-
struct cdns_pcie;
-
-enum cdns_pcie_msg_code {
- MSG_CODE_ASSERT_INTA = 0x20,
- MSG_CODE_ASSERT_INTB = 0x21,
- MSG_CODE_ASSERT_INTC = 0x22,
- MSG_CODE_ASSERT_INTD = 0x23,
- MSG_CODE_DEASSERT_INTA = 0x24,
- MSG_CODE_DEASSERT_INTB = 0x25,
- MSG_CODE_DEASSERT_INTC = 0x26,
- MSG_CODE_DEASSERT_INTD = 0x27,
-};
-
-enum cdns_pcie_msg_routing {
- /* Route to Root Complex */
- MSG_ROUTING_TO_RC,
-
- /* Use Address Routing */
- MSG_ROUTING_BY_ADDR,
-
- /* Use ID Routing */
- MSG_ROUTING_BY_ID,
-
- /* Route as Broadcast Message from Root Complex */
- MSG_ROUTING_BCAST,
-
- /* Local message; terminate at receiver (INTx messages) */
- MSG_ROUTING_LOCAL,
-
- /* Gather & route to Root Complex (PME_TO_Ack message) */
- MSG_ROUTING_GATHER,
+struct cdns_pcie_rc;
+
+enum cdns_pcie_reg_bank {
+ REG_BANK_RP,
+ REG_BANK_IP_REG,
+ REG_BANK_IP_CFG_CTRL_REG,
+ REG_BANK_AXI_MASTER_COMMON,
+ REG_BANK_AXI_MASTER,
+ REG_BANK_AXI_SLAVE,
+ REG_BANK_AXI_HLS,
+ REG_BANK_AXI_RAS,
+ REG_BANK_AXI_DTI,
+ REG_BANKS_MAX,
};
struct cdns_pcie_ops {
- int (*start_link)(struct cdns_pcie *pcie);
- void (*stop_link)(struct cdns_pcie *pcie);
- bool (*link_up)(struct cdns_pcie *pcie);
+ int (*start_link)(struct cdns_pcie *pcie);
+ void (*stop_link)(struct cdns_pcie *pcie);
+ bool (*link_up)(struct cdns_pcie *pcie);
u64 (*cpu_addr_fixup)(struct cdns_pcie *pcie, u64 cpu_addr);
};
/**
+ * struct cdns_plat_pcie_of_data - Register bank offset for a platform
+ * @is_rc: controller is a RC
+ * @ip_reg_bank_offset: ip register bank start offset
+ * @ip_cfg_ctrl_reg_offset: ip config control register start offset
+ * @axi_mstr_common_offset: AXI master common register start offset
+ * @axi_slave_offset: AXI slave start offset
+ * @axi_master_offset: AXI master start offset
+ * @axi_hls_offset: AXI HLS offset start
+ * @axi_ras_offset: AXI RAS offset
+ * @axi_dti_offset: AXI DTI offset
+ */
+struct cdns_plat_pcie_of_data {
+ u32 is_rc:1;
+ u32 ip_reg_bank_offset;
+ u32 ip_cfg_ctrl_reg_offset;
+ u32 axi_mstr_common_offset;
+ u32 axi_slave_offset;
+ u32 axi_master_offset;
+ u32 axi_hls_offset;
+ u32 axi_ras_offset;
+ u32 axi_dti_offset;
+};
+
+/**
* struct cdns_pcie - private data for Cadence PCIe controller drivers
* @reg_base: IO mapped register base
* @mem_res: start/end offsets in the physical system memory to map PCI accesses
+ * @msg_res: Region for send message to map PCI accesses
* @dev: PCIe controller
* @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint.
* @phy_count: number of supported PHY devices
@@ -293,22 +85,24 @@ struct cdns_pcie_ops {
* @link: list of pointers to corresponding device link representations
* @ops: Platform-specific ops to control various inputs from Cadence PCIe
* wrapper
+ * @cdns_pcie_reg_offsets: Register bank offsets for different SoC
*/
struct cdns_pcie {
- void __iomem *reg_base;
- struct resource *mem_res;
- struct device *dev;
- bool is_rc;
- int phy_count;
- struct phy **phy;
- struct device_link **link;
- const struct cdns_pcie_ops *ops;
+ void __iomem *reg_base;
+ struct resource *mem_res;
+ struct resource *msg_res;
+ struct device *dev;
+ bool is_rc;
+ int phy_count;
+ struct phy **phy;
+ struct device_link **link;
+ const struct cdns_pcie_ops *ops;
+ const struct cdns_plat_pcie_of_data *cdns_pcie_reg_offsets;
};
/**
* struct cdns_pcie_rc - private data for this PCIe Root Complex driver
* @pcie: Cadence PCIe controller
- * @dev: pointer to PCIe device
* @cfg_res: start/end offsets in the physical system memory to map PCI
* configuration space accesses
* @cfg_base: IO mapped window to access the PCI configuration space of a
@@ -319,6 +113,8 @@ struct cdns_pcie {
* available
* @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
* @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
+ * @ecam_supported: Whether the ECAM is supported
+ * @no_inbound_map: Whether inbound mapping is supported
*/
struct cdns_pcie_rc {
struct cdns_pcie pcie;
@@ -329,6 +125,8 @@ struct cdns_pcie_rc {
bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
unsigned int quirk_retrain_flag:1;
unsigned int quirk_detect_quiet_flag:1;
+ unsigned int ecam_supported:1;
+ unsigned int no_inbound_map:1;
};
/**
@@ -347,16 +145,16 @@ struct cdns_pcie_epf {
* @max_regions: maximum number of regions supported by hardware
* @ob_region_map: bitmask of mapped outbound regions
* @ob_addr: base addresses in the AXI bus where the outbound regions start
- * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ
* dedicated outbound regions is mapped.
* @irq_cpu_addr: base address in the CPU space where a write access triggers
- * the sending of a memory write (MSI) / normal message (legacy
+ * the sending of a memory write (MSI) / normal message (INTX
* IRQ) TLP through the PCIe bus.
- * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ
* dedicated outbound region.
* @irq_pci_fn: the latest PCI function that has updated the mapping of
- * the MSI/legacy IRQ dedicated outbound region.
- * @irq_pending: bitmask of asserted legacy IRQs.
+ * the MSI/INTX IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted INTX IRQs.
* @lock: spin lock to disable interrupts while modifying PCIe controller
* registers fields (RMW) accessible by both remote RC and EP to
* minimize time between read and write
@@ -374,13 +172,50 @@ struct cdns_pcie_ep {
u64 irq_pci_addr;
u8 irq_pci_fn;
u8 irq_pending;
- /* protect writing to PCI_STATUS while raising legacy interrupts */
+ /* protect writing to PCI_STATUS while raising INTX interrupts */
spinlock_t lock;
struct cdns_pcie_epf *epf;
unsigned int quirk_detect_quiet_flag:1;
unsigned int quirk_disable_flr:1;
};
+static inline u32 cdns_reg_bank_to_off(struct cdns_pcie *pcie, enum cdns_pcie_reg_bank bank)
+{
+ u32 offset = 0x0;
+
+ switch (bank) {
+ case REG_BANK_RP:
+ offset = 0;
+ break;
+ case REG_BANK_IP_REG:
+ offset = pcie->cdns_pcie_reg_offsets->ip_reg_bank_offset;
+ break;
+ case REG_BANK_IP_CFG_CTRL_REG:
+ offset = pcie->cdns_pcie_reg_offsets->ip_cfg_ctrl_reg_offset;
+ break;
+ case REG_BANK_AXI_MASTER_COMMON:
+ offset = pcie->cdns_pcie_reg_offsets->axi_mstr_common_offset;
+ break;
+ case REG_BANK_AXI_MASTER:
+ offset = pcie->cdns_pcie_reg_offsets->axi_master_offset;
+ break;
+ case REG_BANK_AXI_SLAVE:
+ offset = pcie->cdns_pcie_reg_offsets->axi_slave_offset;
+ break;
+ case REG_BANK_AXI_HLS:
+ offset = pcie->cdns_pcie_reg_offsets->axi_hls_offset;
+ break;
+ case REG_BANK_AXI_RAS:
+ offset = pcie->cdns_pcie_reg_offsets->axi_ras_offset;
+ break;
+ case REG_BANK_AXI_DTI:
+ offset = pcie->cdns_pcie_reg_offsets->axi_dti_offset;
+ break;
+ default:
+ break;
+ }
+ return offset;
+}
/* Register access */
static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value)
@@ -393,6 +228,58 @@ static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
return readl(pcie->reg_base + reg);
}
+static inline void cdns_pcie_hpa_writel(struct cdns_pcie *pcie,
+ enum cdns_pcie_reg_bank bank,
+ u32 reg,
+ u32 value)
+{
+ u32 offset = cdns_reg_bank_to_off(pcie, bank);
+
+ reg += offset;
+ writel(value, pcie->reg_base + reg);
+}
+
+static inline u32 cdns_pcie_hpa_readl(struct cdns_pcie *pcie,
+ enum cdns_pcie_reg_bank bank,
+ u32 reg)
+{
+ u32 offset = cdns_reg_bank_to_off(pcie, bank);
+
+ reg += offset;
+ return readl(pcie->reg_base + reg);
+}
+
+static inline u16 cdns_pcie_readw(struct cdns_pcie *pcie, u32 reg)
+{
+ return readw(pcie->reg_base + reg);
+}
+
+static inline u8 cdns_pcie_readb(struct cdns_pcie *pcie, u32 reg)
+{
+ return readb(pcie->reg_base + reg);
+}
+
+static inline int cdns_pcie_read_cfg_byte(struct cdns_pcie *pcie, int where,
+ u8 *val)
+{
+ *val = cdns_pcie_readb(pcie, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int cdns_pcie_read_cfg_word(struct cdns_pcie *pcie, int where,
+ u16 *val)
+{
+ *val = cdns_pcie_readw(pcie, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int cdns_pcie_read_cfg_dword(struct cdns_pcie *pcie, int where,
+ u32 *val)
+{
+ *val = cdns_pcie_readl(pcie, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size)
{
void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
@@ -457,6 +344,29 @@ static inline u16 cdns_pcie_rp_readw(struct cdns_pcie *pcie, u32 reg)
return cdns_pcie_read_sz(addr, 0x2);
}
+static inline void cdns_pcie_hpa_rp_writeb(struct cdns_pcie *pcie,
+ u32 reg, u8 value)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_HPA_RP_BASE + reg;
+
+ cdns_pcie_write_sz(addr, 0x1, value);
+}
+
+static inline void cdns_pcie_hpa_rp_writew(struct cdns_pcie *pcie,
+ u32 reg, u16 value)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_HPA_RP_BASE + reg;
+
+ cdns_pcie_write_sz(addr, 0x2, value);
+}
+
+static inline u16 cdns_pcie_hpa_rp_readw(struct cdns_pcie *pcie, u32 reg)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_HPA_RP_BASE + reg;
+
+ return cdns_pcie_read_sz(addr, 0x2);
+}
+
/* Endpoint Function register access */
static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
u32 reg, u8 value)
@@ -494,7 +404,7 @@ static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
static inline int cdns_pcie_start_link(struct cdns_pcie *pcie)
{
- if (pcie->ops->start_link)
+ if (pcie->ops && pcie->ops->start_link)
return pcie->ops->start_link(pcie);
return 0;
@@ -502,28 +412,51 @@ static inline int cdns_pcie_start_link(struct cdns_pcie *pcie)
static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie)
{
- if (pcie->ops->stop_link)
+ if (pcie->ops && pcie->ops->stop_link)
pcie->ops->stop_link(pcie);
}
static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
{
- if (pcie->ops->link_up)
+ if (pcie->ops && pcie->ops->link_up)
return pcie->ops->link_up(pcie);
return true;
}
-#ifdef CONFIG_PCIE_CADENCE_HOST
+#if IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)
+int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc);
+int cdns_pcie_host_init(struct cdns_pcie_rc *rc);
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
+void cdns_pcie_host_disable(struct cdns_pcie_rc *rc);
void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
+int cdns_pcie_hpa_host_setup(struct cdns_pcie_rc *rc);
#else
+static inline int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
+static inline int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
return 0;
}
+static inline int cdns_pcie_hpa_host_setup(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
+static inline void cdns_pcie_host_disable(struct cdns_pcie_rc *rc)
+{
+}
+
static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
@@ -531,15 +464,31 @@ static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int d
}
#endif
-#ifdef CONFIG_PCIE_CADENCE_EP
+#if IS_ENABLED(CONFIG_PCIE_CADENCE_EP)
int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
+void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep);
+int cdns_pcie_hpa_ep_setup(struct cdns_pcie_ep *ep);
#else
static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
return 0;
}
+
+static inline void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
+{
+}
+
+static inline int cdns_pcie_hpa_ep_setup(struct cdns_pcie_ep *ep)
+{
+ return 0;
+}
+
#endif
+u8 cdns_pcie_find_capability(struct cdns_pcie *pcie, u8 cap);
+u16 cdns_pcie_find_ext_capability(struct cdns_pcie *pcie, u8 cap);
+bool cdns_pcie_linkup(struct cdns_pcie *pcie);
+
void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
@@ -552,8 +501,23 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r);
void cdns_pcie_disable_phy(struct cdns_pcie *pcie);
-int cdns_pcie_enable_phy(struct cdns_pcie *pcie);
-int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie);
+int cdns_pcie_enable_phy(struct cdns_pcie *pcie);
+int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie);
+void cdns_pcie_hpa_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
+void cdns_pcie_hpa_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
+ u32 r, bool is_io,
+ u64 cpu_addr, u64 pci_addr, size_t size);
+void cdns_pcie_hpa_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
+ u8 busnr, u8 fn,
+ u32 r, u64 cpu_addr);
+int cdns_pcie_hpa_host_link_setup(struct cdns_pcie_rc *rc);
+void __iomem *cdns_pci_hpa_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
+int cdns_pcie_hpa_host_start_link(struct cdns_pcie_rc *rc);
+int cdns_pcie_hpa_start_link(struct cdns_pcie *pcie);
+void cdns_pcie_hpa_stop_link(struct cdns_pcie *pcie);
+bool cdns_pcie_hpa_link_up(struct cdns_pcie *pcie);
+
extern const struct dev_pm_ops cdns_pcie_pm_ops;
#endif /* _PCIE_CADENCE_H */
diff --git a/drivers/pci/controller/cadence/pcie-sg2042.c b/drivers/pci/controller/cadence/pcie-sg2042.c
new file mode 100644
index 000000000000..0c50c74d03ee
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-sg2042.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pcie-sg2042 - PCIe controller driver for Sophgo SG2042 SoC
+ *
+ * Copyright (C) 2025 Sophgo Technology Inc.
+ * Copyright (C) 2025 Chen Wang <unicorn_wang@outlook.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "pcie-cadence.h"
+
+/*
+ * SG2042 only supports 4-byte aligned access, so for the rootbus (i.e. to
+ * read/write the Root Port itself, read32/write32 is required. For
+ * non-rootbus (i.e. to read/write the PCIe peripheral registers, supports
+ * 1/2/4 byte aligned access, so directly using read/write should be fine.
+ */
+
+static struct pci_ops sg2042_pcie_root_ops = {
+ .map_bus = cdns_pci_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+};
+
+static struct pci_ops sg2042_pcie_child_ops = {
+ .map_bus = cdns_pci_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static int sg2042_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
+ struct cdns_pcie *pcie;
+ struct cdns_pcie_rc *rc;
+ int ret;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ if (!bridge)
+ return dev_err_probe(dev, -ENOMEM, "Failed to alloc host bridge!\n");
+
+ bridge->ops = &sg2042_pcie_root_ops;
+ bridge->child_ops = &sg2042_pcie_child_ops;
+
+ rc = pci_host_bridge_priv(bridge);
+ pcie = &rc->pcie;
+ pcie->dev = dev;
+
+ platform_set_drvdata(pdev, pcie);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_no_callbacks(dev);
+ devm_pm_runtime_enable(dev);
+
+ ret = cdns_pcie_init_phy(dev, pcie);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init phy!\n");
+
+ ret = cdns_pcie_host_setup(rc);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to setup host!\n");
+ cdns_pcie_disable_phy(pcie);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sg2042_pcie_remove(struct platform_device *pdev)
+{
+ struct cdns_pcie *pcie = platform_get_drvdata(pdev);
+ struct cdns_pcie_rc *rc;
+
+ rc = container_of(pcie, struct cdns_pcie_rc, pcie);
+ cdns_pcie_host_disable(rc);
+
+ cdns_pcie_disable_phy(pcie);
+}
+
+static int sg2042_pcie_suspend_noirq(struct device *dev)
+{
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+
+ cdns_pcie_disable_phy(pcie);
+
+ return 0;
+}
+
+static int sg2042_pcie_resume_noirq(struct device *dev)
+{
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = cdns_pcie_enable_phy(pcie);
+ if (ret) {
+ dev_err(dev, "failed to enable PHY\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static DEFINE_NOIRQ_DEV_PM_OPS(sg2042_pcie_pm_ops,
+ sg2042_pcie_suspend_noirq,
+ sg2042_pcie_resume_noirq);
+
+static const struct of_device_id sg2042_pcie_of_match[] = {
+ { .compatible = "sophgo,sg2042-pcie-host" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sg2042_pcie_of_match);
+
+static struct platform_driver sg2042_pcie_driver = {
+ .driver = {
+ .name = "sg2042-pcie",
+ .of_match_table = sg2042_pcie_of_match,
+ .pm = pm_sleep_ptr(&sg2042_pcie_pm_ops),
+ },
+ .probe = sg2042_pcie_probe,
+ .remove = sg2042_pcie_remove,
+};
+module_platform_driver(sg2042_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCIe controller driver for SG2042 SoCs");
+MODULE_AUTHOR("Chen Wang <unicorn_wang@outlook.com>");
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 99ec91e2a5cf..519b59422b47 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -1,139 +1,127 @@
# SPDX-License-Identifier: GPL-2.0
-menu "DesignWare PCI Core Support"
+menu "DesignWare-based PCIe controllers"
depends on PCI
config PCIE_DW
bool
+config PCIE_DW_DEBUGFS
+ bool "DesignWare PCIe debugfs entries"
+ depends on DEBUG_FS
+ depends on PCIE_DW_HOST || PCIE_DW_EP
+ help
+ Say Y here to enable debugfs entries for the PCIe controller. These
+ entries provide various debug features related to the controller and
+ expose the RAS DES capabilities such as Silicon Debug, Error Injection
+ and Statistical Counters.
+
config PCIE_DW_HOST
bool
select PCIE_DW
+ select IRQ_MSI_LIB
+ select PCI_HOST_COMMON
config PCIE_DW_EP
bool
select PCIE_DW
-config PCI_DRA7XX
- tristate
+config PCIE_AL
+ bool "Amazon Annapurna Labs PCIe controller"
+ depends on OF && (ARM64 || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCI_ECAM
+ help
+ Say Y here to enable support of the Amazon's Annapurna Labs PCIe
+ controller IP on Amazon SoCs. The PCIe controller uses the DesignWare
+ core plus Annapurna Labs proprietary hardware wrappers. This is
+ required only for DT-based platforms. ACPI platforms with the
+ Annapurna Labs PCIe controller don't need to enable this.
-config PCI_DRA7XX_HOST
- tristate "TI DRA7xx PCIe controller Host Mode"
- depends on SOC_DRA7XX || COMPILE_TEST
- depends on OF && HAS_IOMEM && TI_PIPE3
+config PCIE_AMD_MDB
+ bool "AMD MDB Versal2 PCIe controller"
+ depends on OF && (ARM64 || COMPILE_TEST)
depends on PCI_MSI
select PCIE_DW_HOST
- select PCI_DRA7XX
- default y if SOC_DRA7XX
help
- Enables support for the PCIe controller in the DRA7xx SoC to work in
- host mode. There are two instances of PCIe controller in DRA7xx.
- This controller can work either as EP or RC. In order to enable
- host-specific features PCI_DRA7XX_HOST must be selected and in order
- to enable device-specific features PCI_DRA7XX_EP must be selected.
- This uses the DesignWare core.
+ Say Y here if you want to enable PCIe controller support on AMD
+ Versal2 SoCs. The AMD MDB Versal2 PCIe controller is based on
+ DesignWare IP and therefore the driver re-uses the DesignWare
+ core functions to implement the driver.
-config PCI_DRA7XX_EP
- tristate "TI DRA7xx PCIe controller Endpoint Mode"
- depends on SOC_DRA7XX || COMPILE_TEST
- depends on OF && HAS_IOMEM && TI_PIPE3
- depends on PCI_ENDPOINT
- select PCIE_DW_EP
- select PCI_DRA7XX
+config PCI_MESON
+ tristate "Amlogic Meson PCIe controller"
+ default m if ARCH_MESON
+ depends on PCI_MSI
+ select PCIE_DW_HOST
help
- Enables support for the PCIe controller in the DRA7xx SoC to work in
- endpoint mode. There are two instances of PCIe controller in DRA7xx.
- This controller can work either as EP or RC. In order to enable
- host-specific features PCI_DRA7XX_HOST must be selected and in order
- to enable device-specific features PCI_DRA7XX_EP must be selected.
- This uses the DesignWare core.
+ Say Y here if you want to enable PCI controller support on Amlogic
+ SoCs. The PCI controller on Amlogic is based on DesignWare hardware
+ and therefore the driver re-uses the DesignWare core functions to
+ implement the driver.
-config PCIE_DW_PLAT
+config PCIE_ARTPEC6
bool
-config PCIE_DW_PLAT_HOST
- bool "Platform bus based DesignWare PCIe Controller - Host mode"
+config PCIE_ARTPEC6_HOST
+ bool "Axis ARTPEC-6 PCIe controller (host mode)"
+ depends on MACH_ARTPEC6 || COMPILE_TEST
depends on PCI_MSI
select PCIE_DW_HOST
- select PCIE_DW_PLAT
+ select PCIE_ARTPEC6
help
- Enables support for the PCIe controller in the Designware IP to
- work in host mode. There are two instances of PCIe controller in
- Designware IP.
- This controller can work either as EP or RC. In order to enable
- host-specific features PCIE_DW_PLAT_HOST must be selected and in
- order to enable device-specific features PCI_DW_PLAT_EP must be
- selected.
+ Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
+ host mode. This uses the DesignWare core.
-config PCIE_DW_PLAT_EP
- bool "Platform bus based DesignWare PCIe Controller - Endpoint mode"
- depends on PCI && PCI_MSI
+config PCIE_ARTPEC6_EP
+ bool "Axis ARTPEC-6 PCIe controller (endpoint mode)"
+ depends on MACH_ARTPEC6 || COMPILE_TEST
depends on PCI_ENDPOINT
select PCIE_DW_EP
- select PCIE_DW_PLAT
+ select PCIE_ARTPEC6
help
- Enables support for the PCIe controller in the Designware IP to
- work in endpoint mode. There are two instances of PCIe controller
- in Designware IP.
- This controller can work either as EP or RC. In order to enable
- host-specific features PCIE_DW_PLAT_HOST must be selected and in
- order to enable device-specific features PCI_DW_PLAT_EP must be
- selected.
+ Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
+ endpoint mode. This uses the DesignWare core.
-config PCI_EXYNOS
- tristate "Samsung Exynos PCIe controller"
- depends on ARCH_EXYNOS || COMPILE_TEST
+config PCIE_BT1
+ tristate "Baikal-T1 PCIe controller"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
depends on PCI_MSI
select PCIE_DW_HOST
help
- Enables support for the PCIe controller in the Samsung Exynos SoCs
- to work in host mode. The PCI controller is based on the DesignWare
- hardware and therefore the driver re-uses the DesignWare core
- functions to implement the driver.
+ Enables support for the PCIe controller in the Baikal-T1 SoC to work
+ in host mode. It's based on the Synopsys DWC PCIe v4.60a IP-core.
config PCI_IMX6
- bool "Freescale i.MX6/7/8 PCIe controller"
- depends on ARCH_MXC || COMPILE_TEST
- depends on PCI_MSI
- select PCIE_DW_HOST
-
-config PCIE_SPEAR13XX
- bool "STMicroelectronics SPEAr PCIe controller"
- depends on ARCH_SPEAR13XX || COMPILE_TEST
- depends on PCI_MSI
- select PCIE_DW_HOST
- help
- Say Y here if you want PCIe support on SPEAr13XX SoCs.
-
-config PCI_KEYSTONE
bool
-config PCI_KEYSTONE_HOST
- bool "PCI Keystone Host Mode"
- depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+config PCI_IMX6_HOST
+ bool "Freescale i.MX6/7/8 PCIe controller (host mode)"
+ depends on ARCH_MXC || COMPILE_TEST
depends on PCI_MSI
select PCIE_DW_HOST
- select PCI_KEYSTONE
+ select PCI_IMX6
help
- Enables support for the PCIe controller in the Keystone SoC to
- work in host mode. The PCI controller on Keystone is based on
- DesignWare hardware and therefore the driver re-uses the
+ Enables support for the PCIe controller in the i.MX SoCs to
+ work in Root Complex mode. The PCI controller on i.MX is based
+ on DesignWare hardware and therefore the driver re-uses the
DesignWare core functions to implement the driver.
-config PCI_KEYSTONE_EP
- bool "PCI Keystone Endpoint Mode"
- depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+config PCI_IMX6_EP
+ bool "Freescale i.MX6/7/8 PCIe controller (endpoint mode)"
+ depends on ARCH_MXC || COMPILE_TEST
depends on PCI_ENDPOINT
select PCIE_DW_EP
- select PCI_KEYSTONE
+ select PCI_IMX6
help
- Enables support for the PCIe controller in the Keystone SoC to
- work in endpoint mode. The PCI controller on Keystone is based
+ Enables support for the PCIe controller in the i.MX SoCs to
+ work in endpoint mode. The PCI controller on i.MX is based
on DesignWare hardware and therefore the driver re-uses the
DesignWare core functions to implement the driver.
config PCI_LAYERSCAPE
- bool "Freescale Layerscape PCIe controller - Host mode"
+ bool "Freescale Layerscape PCIe controller (host mode)"
depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
depends on PCI_MSI
select PCIE_DW_HOST
@@ -146,7 +134,7 @@ config PCI_LAYERSCAPE
controller works in RC mode.
config PCI_LAYERSCAPE_EP
- bool "Freescale Layerscape PCIe controller - Endpoint mode"
+ bool "Freescale Layerscape PCIe controller (endpoint mode)"
depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
depends on PCI_ENDPOINT
select PCIE_DW_EP
@@ -159,7 +147,7 @@ config PCI_LAYERSCAPE_EP
config PCI_HISI
depends on OF && (ARM64 || COMPILE_TEST)
- bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers"
+ bool "HiSilicon Hip05 and Hip06 SoCs PCIe controller"
depends on PCI_MSI
select PCIE_DW_HOST
select PCI_HOST_COMMON
@@ -167,83 +155,26 @@ config PCI_HISI
Say Y here if you want PCIe controller support on HiSilicon
Hip05 and Hip06 SoCs
-config PCIE_QCOM
- bool "Qualcomm PCIe controller"
- depends on OF && (ARCH_QCOM || COMPILE_TEST)
- depends on PCI_MSI
- select PCIE_DW_HOST
- select CRC8
- help
- Say Y here to enable PCIe controller support on Qualcomm SoCs. The
- PCIe controller uses the DesignWare core plus Qualcomm-specific
- hardware wrappers.
-
-config PCIE_QCOM_EP
- tristate "Qualcomm PCIe controller - Endpoint mode"
- depends on OF && (ARCH_QCOM || COMPILE_TEST)
- depends on PCI_ENDPOINT
- select PCIE_DW_EP
- help
- Say Y here to enable support for the PCIe controllers on Qualcomm SoCs
- to work in endpoint mode. The PCIe controller uses the DesignWare core
- plus Qualcomm-specific hardware wrappers.
-
-config PCIE_ARMADA_8K
- bool "Marvell Armada-8K PCIe controller"
- depends on ARCH_MVEBU || COMPILE_TEST
- depends on PCI_MSI
- select PCIE_DW_HOST
- help
- Say Y here if you want to enable PCIe controller support on
- Armada-8K SoCs. The PCIe controller on Armada-8K is based on
- DesignWare hardware and therefore the driver re-uses the
- DesignWare core functions to implement the driver.
-
-config PCIE_ARTPEC6
- bool
-
-config PCIE_ARTPEC6_HOST
- bool "Axis ARTPEC-6 PCIe controller Host Mode"
- depends on MACH_ARTPEC6 || COMPILE_TEST
+config PCIE_KIRIN
+ depends on OF && (ARM64 || COMPILE_TEST)
+ tristate "HiSilicon Kirin PCIe controller"
depends on PCI_MSI
select PCIE_DW_HOST
- select PCIE_ARTPEC6
+ select REGMAP_MMIO
help
- Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
- host mode. This uses the DesignWare core.
-
-config PCIE_ARTPEC6_EP
- bool "Axis ARTPEC-6 PCIe controller Endpoint Mode"
- depends on MACH_ARTPEC6 || COMPILE_TEST
- depends on PCI_ENDPOINT
- select PCIE_DW_EP
- select PCIE_ARTPEC6
- help
- Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
- endpoint mode. This uses the DesignWare core.
+ Say Y here if you want PCIe controller support
+ on HiSilicon Kirin series SoCs.
-config PCIE_BT1
- tristate "Baikal-T1 PCIe controller"
- depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+config PCIE_HISI_STB
+ bool "HiSilicon STB PCIe controller"
+ depends on ARCH_HISI || COMPILE_TEST
depends on PCI_MSI
select PCIE_DW_HOST
help
- Enables support for the PCIe controller in the Baikal-T1 SoC to work
- in host mode. It's based on the Synopsys DWC PCIe v4.60a IP-core.
-
-config PCIE_ROCKCHIP_DW_HOST
- bool "Rockchip DesignWare PCIe controller"
- select PCIE_DW
- select PCIE_DW_HOST
- depends on PCI_MSI
- depends on ARCH_ROCKCHIP || COMPILE_TEST
- depends on OF
- help
- Enables support for the DesignWare PCIe controller in the
- Rockchip SoC except RK3399.
+ Say Y here if you want PCIe controller support on HiSilicon STB SoCs
config PCIE_INTEL_GW
- bool "Intel Gateway PCIe host controller support"
+ bool "Intel Gateway PCIe controller "
depends on OF && (X86 || COMPILE_TEST)
depends on PCI_MSI
select PCIE_DW_HOST
@@ -257,7 +188,7 @@ config PCIE_KEEMBAY
bool
config PCIE_KEEMBAY_HOST
- bool "Intel Keem Bay PCIe controller - Host mode"
+ bool "Intel Keem Bay PCIe controller (host mode)"
depends on ARCH_KEEMBAY || COMPILE_TEST
depends on PCI_MSI
select PCIE_DW_HOST
@@ -269,7 +200,7 @@ config PCIE_KEEMBAY_HOST
DesignWare core functions.
config PCIE_KEEMBAY_EP
- bool "Intel Keem Bay PCIe controller - Endpoint mode"
+ bool "Intel Keem Bay PCIe controller (endpoint mode)"
depends on ARCH_KEEMBAY || COMPILE_TEST
depends on PCI_MSI
depends on PCI_ENDPOINT
@@ -281,39 +212,22 @@ config PCIE_KEEMBAY_EP
The PCIe controller is based on DesignWare Hardware and uses
DesignWare core functions.
-config PCIE_KIRIN
- depends on OF && (ARM64 || COMPILE_TEST)
- tristate "HiSilicon Kirin series SoCs PCIe controllers"
- depends on PCI_MSI
- select PCIE_DW_HOST
- help
- Say Y here if you want PCIe controller support
- on HiSilicon Kirin series SoCs.
-
-config PCIE_HISI_STB
- bool "HiSilicon STB SoCs PCIe controllers"
- depends on ARCH_HISI || COMPILE_TEST
- depends on PCI_MSI
- select PCIE_DW_HOST
- help
- Say Y here if you want PCIe controller support on HiSilicon STB SoCs
-
-config PCI_MESON
- tristate "MESON PCIe controller"
- default m if ARCH_MESON
+config PCIE_ARMADA_8K
+ bool "Marvell Armada-8K PCIe controller"
+ depends on ARCH_MVEBU || COMPILE_TEST
depends on PCI_MSI
select PCIE_DW_HOST
help
- Say Y here if you want to enable PCI controller support on Amlogic
- SoCs. The PCI controller on Amlogic is based on DesignWare hardware
- and therefore the driver re-uses the DesignWare core functions to
- implement the driver.
+ Say Y here if you want to enable PCIe controller support on
+ Armada-8K SoCs. The PCIe controller on Armada-8K is based on
+ DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
config PCIE_TEGRA194
tristate
config PCIE_TEGRA194_HOST
- tristate "NVIDIA Tegra194 (and later) PCIe controller - Host Mode"
+ tristate "NVIDIA Tegra194 (and later) PCIe controller (host mode)"
depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
depends on PCI_MSI
select PCIE_DW_HOST
@@ -328,7 +242,7 @@ config PCIE_TEGRA194_HOST
selected. This uses the DesignWare core.
config PCIE_TEGRA194_EP
- tristate "NVIDIA Tegra194 (and later) PCIe controller - Endpoint Mode"
+ tristate "NVIDIA Tegra194 (and later) PCIe controller (endpoint mode)"
depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
depends on PCI_ENDPOINT
select PCIE_DW_EP
@@ -342,17 +256,148 @@ config PCIE_TEGRA194_EP
in order to enable device-specific features PCIE_TEGRA194_EP must be
selected. This uses the DesignWare core.
-config PCIE_VISCONTI_HOST
- bool "Toshiba Visconti PCIe controllers"
- depends on ARCH_VISCONTI || COMPILE_TEST
+config PCIE_NXP_S32G
+ bool "NXP S32G PCIe controller (host mode)"
+ depends on ARCH_S32 || COMPILE_TEST
+ select PCIE_DW_HOST
+ help
+ Enable support for the PCIe controller in NXP S32G based boards to
+ work in Host mode. The controller is based on DesignWare IP and
+ can work either as RC or EP. In order to enable host-specific
+ features PCIE_NXP_S32G must be selected.
+
+config PCIE_DW_PLAT
+ bool
+
+config PCIE_DW_PLAT_HOST
+ bool "Platform bus based DesignWare PCIe controller (host mode)"
depends on PCI_MSI
select PCIE_DW_HOST
+ select PCIE_DW_PLAT
help
- Say Y here if you want PCIe controller support on Toshiba Visconti SoC.
- This driver supports TMPV7708 SoC.
+ Enables support for the PCIe controller in the Designware IP to
+ work in host mode. There are two instances of PCIe controller in
+ Designware IP.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCIE_DW_PLAT_HOST must be selected and in
+ order to enable device-specific features PCI_DW_PLAT_EP must be
+ selected.
+
+config PCIE_DW_PLAT_EP
+ bool "Platform bus based DesignWare PCIe controller (endpoint mode)"
+ depends on PCI && PCI_MSI
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_DW_PLAT
+ help
+ Enables support for the PCIe controller in the Designware IP to
+ work in endpoint mode. There are two instances of PCIe controller
+ in Designware IP.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCIE_DW_PLAT_HOST must be selected and in
+ order to enable device-specific features PCI_DW_PLAT_EP must be
+ selected.
+
+config PCIE_QCOM_COMMON
+ bool
+
+config PCIE_QCOM
+ bool "Qualcomm PCIe controller (host mode)"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select CRC8
+ select PCIE_QCOM_COMMON
+ select PCI_HOST_COMMON
+ select PCI_PWRCTRL_SLOT
+ help
+ Say Y here to enable PCIe controller support on Qualcomm SoCs. The
+ PCIe controller uses the DesignWare core plus Qualcomm-specific
+ hardware wrappers.
+
+config PCIE_QCOM_EP
+ tristate "Qualcomm PCIe controller (endpoint mode)"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_QCOM_COMMON
+ help
+ Say Y here to enable support for the PCIe controllers on Qualcomm SoCs
+ to work in endpoint mode. The PCIe controller uses the DesignWare core
+ plus Qualcomm-specific hardware wrappers.
+
+config PCIE_RCAR_GEN4
+ tristate
+
+config PCIE_RCAR_GEN4_HOST
+ tristate "Renesas R-Car Gen4 PCIe controller (host mode)"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCIE_RCAR_GEN4
+ help
+ Say Y here if you want PCIe controller (host mode) on R-Car Gen4 SoCs.
+ To compile this driver as a module, choose M here: the module will be
+ called pcie-rcar-gen4.ko. This uses the DesignWare core.
+
+config PCIE_RCAR_GEN4_EP
+ tristate "Renesas R-Car Gen4 PCIe controller (endpoint mode)"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_RCAR_GEN4
+ help
+ Say Y here if you want PCIe controller (endpoint mode) on R-Car Gen4
+ SoCs. To compile this driver as a module, choose M here: the module
+ will be called pcie-rcar-gen4.ko. This uses the DesignWare core.
+
+config PCIE_ROCKCHIP_DW
+ bool
+
+config PCIE_ROCKCHIP_DW_HOST
+ bool "Rockchip DesignWare PCIe controller (host mode)"
+ depends on PCI_MSI
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ select PCIE_DW_HOST
+ select PCIE_ROCKCHIP_DW
+ help
+ Enables support for the DesignWare PCIe controller in the
+ Rockchip SoC (except RK3399) to work in host mode.
+
+config PCIE_ROCKCHIP_DW_EP
+ bool "Rockchip DesignWare PCIe controller (endpoint mode)"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_ROCKCHIP_DW
+ help
+ Enables support for the DesignWare PCIe controller in the
+ Rockchip SoC (except RK3399) to work in endpoint mode.
+
+config PCI_EXYNOS
+ tristate "Samsung Exynos PCIe controller"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Enables support for the PCIe controller in the Samsung Exynos SoCs
+ to work in host mode. The PCI controller is based on the DesignWare
+ hardware and therefore the driver re-uses the DesignWare core
+ functions to implement the driver.
+
+config PCIE_FU740
+ bool "SiFive FU740 PCIe controller"
+ depends on PCI_MSI
+ depends on ARCH_SIFIVE || COMPILE_TEST
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support for the SiFive
+ FU740.
config PCIE_UNIPHIER
- bool "Socionext UniPhier PCIe host controllers"
+ bool "Socionext UniPhier PCIe controller (host mode)"
depends on ARCH_UNIPHIER || COMPILE_TEST
depends on OF && HAS_IOMEM
depends on PCI_MSI
@@ -362,7 +407,7 @@ config PCIE_UNIPHIER
This driver supports LD20 and PXs3 SoCs.
config PCIE_UNIPHIER_EP
- bool "Socionext UniPhier PCIe endpoint controllers"
+ bool "Socionext UniPhier PCIe controller (endpoint mode)"
depends on ARCH_UNIPHIER || COMPILE_TEST
depends on OF && HAS_IOMEM
depends on PCI_ENDPOINT
@@ -371,26 +416,136 @@ config PCIE_UNIPHIER_EP
Say Y here if you want PCIe endpoint controller support on
UniPhier SoCs. This driver supports Pro5 SoC.
-config PCIE_AL
- bool "Amazon Annapurna Labs PCIe controller"
- depends on OF && (ARM64 || COMPILE_TEST)
+config PCIE_SOPHGO_DW
+ bool "Sophgo DesignWare PCIe controller (host mode)"
+ depends on ARCH_SOPHGO || COMPILE_TEST
depends on PCI_MSI
+ depends on OF
select PCIE_DW_HOST
- select PCI_ECAM
help
- Say Y here to enable support of the Amazon's Annapurna Labs PCIe
- controller IP on Amazon SoCs. The PCIe controller uses the DesignWare
- core plus Annapurna Labs proprietary hardware wrappers. This is
- required only for DT-based platforms. ACPI platforms with the
- Annapurna Labs PCIe controller don't need to enable this.
+ Say Y here if you want PCIe host controller support on
+ Sophgo SoCs.
-config PCIE_FU740
- bool "SiFive FU740 PCIe host controller"
+config PCIE_SPACEMIT_K1
+ tristate "SpacemiT K1 PCIe controller (host mode)"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ depends on HAS_IOMEM
+ select PCIE_DW_HOST
+ select PCI_PWRCTRL_SLOT
+ default ARCH_SPACEMIT
+ help
+ Enables support for the DesignWare based PCIe controller in
+ the SpacemiT K1 SoC operating in host mode. Three controllers
+ are available on the K1 SoC; the first of these shares a PHY
+ with a USB 3.0 host controller (one or the other can be used).
+
+config PCIE_SPEAR13XX
+ bool "STMicroelectronics SPEAr PCIe controller"
+ depends on ARCH_SPEAR13XX || COMPILE_TEST
depends on PCI_MSI
- depends on SOC_SIFIVE || COMPILE_TEST
select PCIE_DW_HOST
help
- Say Y here if you want PCIe controller support for the SiFive
- FU740.
+ Say Y here if you want PCIe support on SPEAr13XX SoCs.
+
+config PCIE_STM32_HOST
+ tristate "STMicroelectronics STM32MP25 PCIe Controller (host mode)"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Enables Root Complex (RC) support for the DesignWare core based PCIe
+ controller found in STM32MP25 SoC.
+
+ This driver can also be built as a module. If so, the module
+ will be called pcie-stm32.
+
+config PCIE_STM32_EP
+ tristate "STMicroelectronics STM32MP25 PCIe Controller (endpoint mode)"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Enables Endpoint (EP) support for the DesignWare core based PCIe
+ controller found in STM32MP25 SoC.
+
+ This driver can also be built as a module. If so, the module
+ will be called pcie-stm32-ep.
+
+config PCI_DRA7XX
+ tristate
+
+config PCI_DRA7XX_HOST
+ tristate "TI DRA7xx PCIe controller (host mode)"
+ depends on SOC_DRA7XX || COMPILE_TEST
+ depends on OF && HAS_IOMEM && TI_PIPE3
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCI_DRA7XX
+ default y if SOC_DRA7XX
+ help
+ Enables support for the PCIe controller in the DRA7xx SoC to work in
+ host mode. There are two instances of PCIe controller in DRA7xx.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCI_DRA7XX_HOST must be selected and in order
+ to enable device-specific features PCI_DRA7XX_EP must be selected.
+ This uses the DesignWare core.
+
+config PCI_DRA7XX_EP
+ tristate "TI DRA7xx PCIe controller (endpoint mode)"
+ depends on SOC_DRA7XX || COMPILE_TEST
+ depends on OF && HAS_IOMEM && TI_PIPE3
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCI_DRA7XX
+ help
+ Enables support for the PCIe controller in the DRA7xx SoC to work in
+ endpoint mode. There are two instances of PCIe controller in DRA7xx.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCI_DRA7XX_HOST must be selected and in order
+ to enable device-specific features PCI_DRA7XX_EP must be selected.
+ This uses the DesignWare core.
+
+# ARM32 platforms use hook_fault_code() and cannot support loadable module.
+config PCI_KEYSTONE
+ bool
+
+# On non-ARM32 platforms, loadable module can be supported.
+config PCI_KEYSTONE_TRISTATE
+ tristate
+
+config PCI_KEYSTONE_HOST
+ tristate "TI Keystone PCIe controller (host mode)"
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCI_KEYSTONE if ARM
+ select PCI_KEYSTONE_TRISTATE if !ARM
+ help
+ Enables support for the PCIe controller in the Keystone SoC to
+ work in host mode. The PCI controller on Keystone is based on
+ DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCI_KEYSTONE_EP
+ tristate "TI Keystone PCIe controller (endpoint mode)"
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCI_KEYSTONE if ARM
+ select PCI_KEYSTONE_TRISTATE if !ARM
+ help
+ Enables support for the PCIe controller in the Keystone SoC to
+ work in endpoint mode. The PCI controller on Keystone is based
+ on DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCIE_VISCONTI_HOST
+ bool "Toshiba Visconti PCIe controller"
+ depends on ARCH_VISCONTI || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support on Toshiba Visconti SoC.
+ This driver supports TMPV7708 SoC.
endmenu
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index bf5c311875a1..67ba59c02038 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -1,22 +1,30 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCIE_DW_DEBUGFS) += pcie-designware-debugfs.o
obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
+obj-$(CONFIG_PCIE_AMD_MDB) += pcie-amd-mdb.o
obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCIE_FU740) += pcie-fu740.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
+obj-$(CONFIG_PCIE_NXP_S32G) += pcie-nxp-s32g.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
+# ARM32 platforms use hook_fault_code() and cannot support loadable module.
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
+# On non-ARM32 platforms, loadable module can be supported.
+obj-$(CONFIG_PCI_KEYSTONE_TRISTATE) += pci-keystone.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
+obj-$(CONFIG_PCIE_QCOM_COMMON) += pcie-qcom-common.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
-obj-$(CONFIG_PCIE_ROCKCHIP_DW_HOST) += pcie-dw-rockchip.o
+obj-$(CONFIG_PCIE_ROCKCHIP_DW) += pcie-dw-rockchip.o
+obj-$(CONFIG_PCIE_SOPHGO_DW) += pcie-sophgo.o
obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keembay.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
@@ -26,6 +34,10 @@ obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
obj-$(CONFIG_PCIE_VISCONTI_HOST) += pcie-visconti.o
+obj-$(CONFIG_PCIE_RCAR_GEN4) += pcie-rcar-gen4.o
+obj-$(CONFIG_PCIE_SPACEMIT_K1) += pcie-spacemit-k1.o
+obj-$(CONFIG_PCIE_STM32_HOST) += pcie-stm32.o
+obj-$(CONFIG_PCIE_STM32_EP) += pcie-stm32-ep.o
# The following drivers are for devices that use the generic ACPI
# pci_root.c driver but don't support standard ECAM config access.
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 38462ed11d07..01cfd9aeb0b8 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -13,11 +13,11 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -113,17 +113,17 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
writel(value, pcie->base + offset);
}
-static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
{
- return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
+ return cpu_addr & DRA7XX_CPU_TO_BUS_ADDR;
}
-static int dra7xx_pcie_link_up(struct dw_pcie *pci)
+static bool dra7xx_pcie_link_up(struct dw_pcie *pci)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
- return !!(reg & LINK_UP);
+ return reg & LINK_UP;
}
static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
@@ -359,8 +359,8 @@ static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler,
pp);
- dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, pp);
+ dra7xx->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX, &intx_domain_ops, pp);
of_node_put(pcie_intc_node);
if (!dra7xx->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
@@ -371,7 +371,7 @@ static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
- .host_init = dra7xx_pcie_host_init,
+ .init = dra7xx_pcie_host_init,
};
static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
@@ -386,7 +386,7 @@ static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
}
-static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
+static void dra7xx_pcie_raise_intx_irq(struct dra7xx_pcie *dra7xx)
{
dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
mdelay(1);
@@ -404,16 +404,16 @@ static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
}
static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- dra7xx_pcie_raise_legacy_irq(dra7xx);
+ case PCI_IRQ_INTX:
+ dra7xx_pcie_raise_intx_irq(dra7xx);
break;
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
break;
default:
@@ -426,7 +426,6 @@ static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features dra7xx_pcie_epc_features = {
.linkup_notifier = true,
.msi_capable = true,
- .msix_capable = false,
};
static const struct pci_epc_features*
@@ -436,7 +435,7 @@ dra7xx_pcie_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .ep_init = dra7xx_pcie_ep_init,
+ .init = dra7xx_pcie_ep_init,
.raise_irq = dra7xx_pcie_raise_irq,
.get_features = dra7xx_pcie_get_features,
};
@@ -467,6 +466,15 @@ static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
return ret;
}
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(ep->epc);
+
return 0;
}
@@ -626,30 +634,20 @@ static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
{
int ret;
struct device_node *np = dev->of_node;
- struct of_phandle_args args;
+ unsigned int args[2];
struct regmap *regmap;
- regmap = syscon_regmap_lookup_by_phandle(np,
- "ti,syscon-unaligned-access");
+ regmap = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-unaligned-access",
+ 2, args);
if (IS_ERR(regmap)) {
dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
return -EINVAL;
}
- ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
- 2, 0, &args);
- if (ret) {
- dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
- return ret;
- }
-
- ret = regmap_update_bits(regmap, args.args[0], args.args[1],
- args.args[1]);
+ ret = regmap_update_bits(regmap, args[0], args[1], args[1]);
if (ret)
dev_err(dev, "failed to enable unaligned access\n");
- of_node_put(args.np);
-
return ret;
}
@@ -662,18 +660,13 @@ static int dra7xx_pcie_configure_two_lane(struct device *dev,
u32 mask;
u32 val;
- pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel");
+ pcie_syscon = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-lane-sel",
+ 1, &pcie_reg);
if (IS_ERR(pcie_syscon)) {
dev_err(dev, "unable to get ti,syscon-lane-sel\n");
return -EINVAL;
}
- if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1,
- &pcie_reg)) {
- dev_err(dev, "couldn't get lane selection reg offset\n");
- return -EINVAL;
- }
-
mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN;
val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN;
regmap_update_bits(pcie_syscon, pcie_reg, mask, val);
@@ -840,15 +833,22 @@ static int dra7xx_pcie_probe(struct platform_device *pdev)
}
dra7xx->mode = mode;
- ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
- IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+ ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler,
+ IRQF_SHARED | IRQF_ONESHOT,
+ "dra7xx-pcie-main", dra7xx);
if (ret) {
dev_err(dev, "failed to request irq\n");
- goto err_gpio;
+ goto err_deinit;
}
return 0;
+err_deinit:
+ if (dra7xx->mode == DW_PCIE_RC_TYPE)
+ dw_pcie_host_deinit(&dra7xx->pci->pp);
+ else
+ dw_pcie_ep_deinit(&dra7xx->pci->ep);
+
err_gpio:
err_get_sync:
pm_runtime_put(dev);
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index ec5611005566..0bb7d4f5d784 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -14,11 +14,11 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include "pcie-designware.h"
@@ -53,44 +53,11 @@
struct exynos_pcie {
struct dw_pcie pci;
- void __iomem *elbi_base;
- struct clk *clk;
- struct clk *bus_clk;
+ struct clk_bulk_data *clks;
struct phy *phy;
struct regulator_bulk_data supplies[2];
};
-static int exynos_pcie_init_clk_resources(struct exynos_pcie *ep)
-{
- struct device *dev = ep->pci.dev;
- int ret;
-
- ret = clk_prepare_enable(ep->clk);
- if (ret) {
- dev_err(dev, "cannot enable pcie rc clock");
- return ret;
- }
-
- ret = clk_prepare_enable(ep->bus_clk);
- if (ret) {
- dev_err(dev, "cannot enable pcie bus clock");
- goto err_bus_clk;
- }
-
- return 0;
-
-err_bus_clk:
- clk_disable_unprepare(ep->clk);
-
- return ret;
-}
-
-static void exynos_pcie_deinit_clk_resources(struct exynos_pcie *ep)
-{
- clk_disable_unprepare(ep->bus_clk);
- clk_disable_unprepare(ep->clk);
-}
-
static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)
{
writel(val, base + reg);
@@ -103,73 +70,78 @@ static u32 exynos_pcie_readl(void __iomem *base, u32 reg)
static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_AWMISC);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_ELBI_SLV_AWMISC);
if (on)
val |= PCIE_ELBI_SLV_DBI_ENABLE;
else
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
}
static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_ARMISC);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_ELBI_SLV_ARMISC);
if (on)
val |= PCIE_ELBI_SLV_DBI_ENABLE;
else
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
}
static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_CORE_RESET);
val &= ~PCIE_CORE_RESET_ENABLE;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_STICKY_RESET);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_STICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_NONSTICKY_RESET);
}
static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_CORE_RESET);
val |= PCIE_CORE_RESET_ENABLE;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
- exynos_pcie_writel(ep->elbi_base, 1, PCIE_STICKY_RESET);
- exynos_pcie_writel(ep->elbi_base, 1, PCIE_NONSTICKY_RESET);
- exynos_pcie_writel(ep->elbi_base, 1, PCIE_APP_INIT_RESET);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_APP_INIT_RESET);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(pci->elbi_base, 1, PCIE_STICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, 1, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, 1, PCIE_APP_INIT_RESET);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_APP_INIT_RESET);
}
static int exynos_pcie_start_link(struct dw_pcie *pci)
{
- struct exynos_pcie *ep = to_exynos_pcie(pci);
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_SW_WAKE);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_SW_WAKE);
val &= ~PCIE_BUS_EN;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_SW_WAKE);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_SW_WAKE);
/* assert LTSSM enable */
- exynos_pcie_writel(ep->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
+ exynos_pcie_writel(pci->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
PCIE_APP_LTSSM_ENABLE);
return 0;
}
static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep)
{
- u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_IRQ_PULSE);
+ struct dw_pcie *pci = &ep->pci;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_PULSE);
+ u32 val = exynos_pcie_readl(pci->elbi_base, PCIE_IRQ_PULSE);
+
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_IRQ_PULSE);
}
static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
@@ -182,12 +154,14 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
{
+ struct dw_pcie *pci = &ep->pci;
+
u32 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_EN_PULSE);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_LEVEL);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_SPECIAL);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_IRQ_EN_PULSE);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_IRQ_EN_LEVEL);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_IRQ_EN_SPECIAL);
}
static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
@@ -241,12 +215,11 @@ static struct pci_ops exynos_pci_ops = {
.write = exynos_pcie_wr_own_conf,
};
-static int exynos_pcie_link_up(struct dw_pcie *pci)
+static bool exynos_pcie_link_up(struct dw_pcie *pci)
{
- struct exynos_pcie *ep = to_exynos_pcie(pci);
- u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_RDLH_LINKUP);
+ u32 val = exynos_pcie_readl(pci->elbi_base, PCIE_ELBI_RDLH_LINKUP);
- return (val & PCIE_ELBI_XMLH_LINKUP);
+ return val & PCIE_ELBI_XMLH_LINKUP;
}
static int exynos_pcie_host_init(struct dw_pcie_rp *pp)
@@ -268,7 +241,7 @@ static int exynos_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
- .host_init = exynos_pcie_host_init,
+ .init = exynos_pcie_host_init,
};
static int exynos_add_pcie_port(struct exynos_pcie *ep,
@@ -327,22 +300,9 @@ static int exynos_pcie_probe(struct platform_device *pdev)
if (IS_ERR(ep->phy))
return PTR_ERR(ep->phy);
- /* External Local Bus interface (ELBI) registers */
- ep->elbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi");
- if (IS_ERR(ep->elbi_base))
- return PTR_ERR(ep->elbi_base);
-
- ep->clk = devm_clk_get(dev, "pcie");
- if (IS_ERR(ep->clk)) {
- dev_err(dev, "Failed to get pcie rc clock\n");
- return PTR_ERR(ep->clk);
- }
-
- ep->bus_clk = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(ep->bus_clk)) {
- dev_err(dev, "Failed to get pcie bus clock\n");
- return PTR_ERR(ep->bus_clk);
- }
+ ret = devm_clk_bulk_get_all_enabled(dev, &ep->clks);
+ if (ret < 0)
+ return ret;
ep->supplies[0].supply = "vdd18";
ep->supplies[1].supply = "vdd10";
@@ -351,10 +311,6 @@ static int exynos_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = exynos_pcie_init_clk_resources(ep);
- if (ret)
- return ret;
-
ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
if (ret)
return ret;
@@ -369,13 +325,12 @@ static int exynos_pcie_probe(struct platform_device *pdev)
fail_probe:
phy_exit(ep->phy);
- exynos_pcie_deinit_clk_resources(ep);
regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
return ret;
}
-static int __exit exynos_pcie_remove(struct platform_device *pdev)
+static void exynos_pcie_remove(struct platform_device *pdev)
{
struct exynos_pcie *ep = platform_get_drvdata(pdev);
@@ -383,10 +338,7 @@ static int __exit exynos_pcie_remove(struct platform_device *pdev)
exynos_pcie_assert_core_reset(ep);
phy_power_off(ep->phy);
phy_exit(ep->phy);
- exynos_pcie_deinit_clk_resources(ep);
regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
-
- return 0;
}
static int exynos_pcie_suspend_noirq(struct device *dev)
@@ -431,7 +383,7 @@ static const struct of_device_id exynos_pcie_of_match[] = {
static struct platform_driver exynos_pcie_driver = {
.probe = exynos_pcie_probe,
- .remove = __exit_p(exynos_pcie_remove),
+ .remove = exynos_pcie_remove,
.driver = {
.name = "exynos-pcie",
.of_match_table = exynos_pcie_of_match,
@@ -439,5 +391,6 @@ static struct platform_driver exynos_pcie_driver = {
},
};
module_platform_driver(exynos_pcie_driver);
+MODULE_DESCRIPTION("Samsung Exynos PCIe host controller driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 1dde5c579edc..4668fc9648bf 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -11,14 +11,13 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -29,10 +28,12 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
+#include <linux/phy/pcie.h>
#include <linux/phy/phy.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include "../../pci.h"
#include "pcie-designware.h"
#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
@@ -40,11 +41,47 @@
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
#define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12)
#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
-#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
-#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
+#define IMX95_PCIE_PHY_GEN_CTRL 0x0
+#define IMX95_PCIE_REF_USE_PAD BIT(17)
-enum imx6_pcie_variants {
+#define IMX95_PCIE_PHY_MPLLA_CTRL 0x10
+#define IMX95_PCIE_PHY_MPLL_STATE BIT(30)
+
+#define IMX95_PCIE_SS_RW_REG_0 0xf0
+#define IMX95_PCIE_REF_CLKEN BIT(23)
+#define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9)
+#define IMX95_PCIE_SS_RW_REG_1 0xf4
+#define IMX95_PCIE_SYS_AUX_PWR_DET BIT(31)
+
+#define IMX95_PE0_GEN_CTRL_1 0x1050
+#define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0)
+
+#define IMX95_PE0_GEN_CTRL_3 0x1058
+#define IMX95_PCIE_LTSSM_EN BIT(0)
+
+#define IMX95_PE0_LUT_ACSCTRL 0x1008
+#define IMX95_PEO_LUT_RWA BIT(16)
+#define IMX95_PE0_LUT_ENLOC GENMASK(4, 0)
+
+#define IMX95_PE0_LUT_DATA1 0x100c
+#define IMX95_PE0_LUT_VLD BIT(31)
+#define IMX95_PE0_LUT_DAC_ID GENMASK(10, 8)
+#define IMX95_PE0_LUT_STREAM_ID GENMASK(5, 0)
+
+#define IMX95_PE0_LUT_DATA2 0x1010
+#define IMX95_PE0_LUT_REQID GENMASK(31, 16)
+#define IMX95_PE0_LUT_MASK GENMASK(15, 0)
+
+#define IMX95_SID_MASK GENMASK(5, 0)
+#define IMX95_MAX_LUT 32
+
+#define IMX95_PCIE_RST_CTRL 0x3010
+#define IMX95_PCIE_COLD_RST BIT(0)
+
+#define to_imx_pcie(x) dev_get_drvdata((x)->dev)
+
+enum imx_pcie_variants {
IMX6Q,
IMX6SX,
IMX6QP,
@@ -52,34 +89,71 @@ enum imx6_pcie_variants {
IMX8MQ,
IMX8MM,
IMX8MP,
+ IMX8Q,
+ IMX95,
+ IMX8MQ_EP,
+ IMX8MM_EP,
+ IMX8MP_EP,
+ IMX8Q_EP,
+ IMX95_EP,
};
-#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
-#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
-#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
+#define IMX_PCIE_FLAG_IMX_PHY BIT(0)
+#define IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND BIT(1)
+#define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
+#define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3)
+#define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4)
+#define IMX_PCIE_FLAG_HAS_PHY_RESET BIT(5)
+#define IMX_PCIE_FLAG_HAS_SERDES BIT(6)
+#define IMX_PCIE_FLAG_SUPPORT_64BIT BIT(7)
+#define IMX_PCIE_FLAG_CPU_ADDR_FIXUP BIT(8)
+/*
+ * Because of ERR005723 (PCIe does not support L2 power down) we need to
+ * workaround suspend resume on some devices which are affected by this errata.
+ */
+#define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9)
+#define IMX_PCIE_FLAG_HAS_LUT BIT(10)
+#define IMX_PCIE_FLAG_8GT_ECN_ERR051586 BIT(11)
+
+#define imx_check_flag(pci, val) (pci->drvdata->flags & val)
+
+#define IMX_PCIE_MAX_INSTANCES 2
-struct imx6_pcie_drvdata {
- enum imx6_pcie_variants variant;
+struct imx_pcie;
+
+struct imx_pcie_drvdata {
+ enum imx_pcie_variants variant;
+ enum dw_pcie_device_mode mode;
u32 flags;
int dbi_length;
const char *gpr;
+ const u32 ltssm_off;
+ const u32 ltssm_mask;
+ const u32 mode_off[IMX_PCIE_MAX_INSTANCES];
+ const u32 mode_mask[IMX_PCIE_MAX_INSTANCES];
+ const struct pci_epc_features *epc_features;
+ int (*init_phy)(struct imx_pcie *pcie);
+ int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable);
+ int (*core_reset)(struct imx_pcie *pcie, bool assert);
+ int (*wait_pll_lock)(struct imx_pcie *pcie);
+ const struct dw_pcie_host_ops *ops;
+};
+
+struct imx_lut_data {
+ u32 data1;
+ u32 data2;
};
-struct imx6_pcie {
+struct imx_pcie {
struct dw_pcie *pci;
- int reset_gpio;
- bool gpio_active_high;
- bool link_is_up;
- struct clk *pcie_bus;
- struct clk *pcie_phy;
- struct clk *pcie_inbound_axi;
- struct clk *pcie;
- struct clk *pcie_aux;
+ struct gpio_desc *reset_gpiod;
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regmap *iomuxc_gpr;
+ u16 msi_ctrl;
u32 controller_id;
struct reset_control *pciephy_reset;
struct reset_control *apps_reset;
- struct reset_control *turnoff_reset;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
@@ -89,12 +163,17 @@ struct imx6_pcie {
struct regulator *vph;
void __iomem *phy_base;
+ /* LUT data for pcie */
+ struct imx_lut_data luts[IMX95_MAX_LUT];
/* power domain for pcie */
struct device *pd_pcie;
/* power domain for pcie phy */
struct device *pd_pcie_phy;
struct phy *phy;
- const struct imx6_pcie_drvdata *drvdata;
+ const struct imx_pcie_drvdata *drvdata;
+
+ /* Ensure that only one device's LUT is configured at any given time */
+ struct mutex lock;
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
@@ -149,35 +228,77 @@ struct imx6_pcie {
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
-static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
+static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie)
{
- WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
- imx6_pcie->drvdata->variant != IMX8MM &&
- imx6_pcie->drvdata->variant != IMX8MP);
- return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
+ WARN_ON(imx_pcie->drvdata->variant != IMX8MQ &&
+ imx_pcie->drvdata->variant != IMX8MQ_EP &&
+ imx_pcie->drvdata->variant != IMX8MM &&
+ imx_pcie->drvdata->variant != IMX8MM_EP &&
+ imx_pcie->drvdata->variant != IMX8MP &&
+ imx_pcie->drvdata->variant != IMX8MP_EP);
+ return imx_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}
-static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
+static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- unsigned int mask, val;
+ /*
+ * ERR051624: The Controller Without Vaux Cannot Exit L23 Ready
+ * Through Beacon or PERST# De-assertion
+ *
+ * When the auxiliary power is not available, the controller
+ * cannot exit from L23 Ready with beacon or PERST# de-assertion
+ * when main power is not removed.
+ *
+ * Workaround: Set SS_RW_REG_1[SYS_AUX_PWR_DET] to 1.
+ */
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_1,
+ IMX95_PCIE_SYS_AUX_PWR_DET);
+
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ IMX95_PCIE_SS_RW_REG_0,
+ IMX95_PCIE_PHY_CR_PARA_SEL,
+ IMX95_PCIE_PHY_CR_PARA_SEL);
+
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ IMX95_PCIE_PHY_GEN_CTRL,
+ IMX95_PCIE_REF_USE_PAD, 0);
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ IMX95_PCIE_SS_RW_REG_0,
+ IMX95_PCIE_REF_CLKEN,
+ IMX95_PCIE_REF_CLKEN);
- if (imx6_pcie->drvdata->variant == IMX8MQ &&
- imx6_pcie->controller_id == 1) {
- mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
- val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
- PCI_EXP_TYPE_ROOT_PORT);
- } else {
- mask = IMX6Q_GPR12_DEVICE_TYPE;
- val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
- PCI_EXP_TYPE_ROOT_PORT);
- }
+ return 0;
+}
+
+static void imx_pcie_configure_type(struct imx_pcie *imx_pcie)
+{
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
+ unsigned int mask, val, mode, id;
+
+ if (drvdata->mode == DW_PCIE_EP_TYPE)
+ mode = PCI_EXP_TYPE_ENDPOINT;
+ else
+ mode = PCI_EXP_TYPE_ROOT_PORT;
+
+ id = imx_pcie->controller_id;
+
+ /* If mode_mask is 0, generic PHY driver is used to set the mode */
+ if (!drvdata->mode_mask[0])
+ return;
+
+ /* If mode_mask[id] is 0, each controller has its individual GPR */
+ if (!drvdata->mode_mask[id])
+ id = 0;
+
+ mask = drvdata->mode_mask[id];
+ val = mode << (ffs(mask) - 1);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val);
}
-static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
+static int pcie_phy_poll_ack(struct imx_pcie *imx_pcie, bool exp_val)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
bool val;
u32 max_iterations = 10;
u32 wait_counter = 0;
@@ -196,9 +317,9 @@ static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
return -ETIMEDOUT;
}
-static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
+static int pcie_phy_wait_ack(struct imx_pcie *imx_pcie, int addr)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 val;
int ret;
@@ -208,24 +329,24 @@ static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
val |= PCIE_PHY_CTRL_CAP_ADR;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
val = PCIE_PHY_CTRL_DATA(addr);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- return pcie_phy_poll_ack(imx6_pcie, false);
+ return pcie_phy_poll_ack(imx_pcie, false);
}
/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
-static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
+static int pcie_phy_read(struct imx_pcie *imx_pcie, int addr, u16 *data)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 phy_ctl;
int ret;
- ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ ret = pcie_phy_wait_ack(imx_pcie, addr);
if (ret)
return ret;
@@ -233,7 +354,7 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
phy_ctl = PCIE_PHY_CTRL_RD;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -242,18 +363,18 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
/* deassert Read signal */
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
- return pcie_phy_poll_ack(imx6_pcie, false);
+ return pcie_phy_poll_ack(imx_pcie, false);
}
-static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
+static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 var;
int ret;
/* write addr */
/* cap addr */
- ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ ret = pcie_phy_wait_ack(imx_pcie, addr);
if (ret)
return ret;
@@ -264,7 +385,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
var |= PCIE_PHY_CTRL_CAP_DAT;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -273,7 +394,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, false);
+ ret = pcie_phy_poll_ack(imx_pcie, false);
if (ret)
return ret;
@@ -282,7 +403,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack */
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -291,7 +412,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, false);
+ ret = pcie_phy_poll_ack(imx_pcie, false);
if (ret)
return ret;
@@ -300,81 +421,68 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
return 0;
}
-static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MM:
- case IMX8MP:
- /*
- * The PHY initialization had been done in the PHY
- * driver, break here directly.
- */
- break;
- case IMX8MQ:
- /*
- * TODO: Currently this code assumes external
- * oscillator is being used
- */
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
- IMX8MQ_GPR_PCIE_REF_USE_PAD,
- IMX8MQ_GPR_PCIE_REF_USE_PAD);
- /*
- * Regarding the datasheet, the PCIE_VPH is suggested
- * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
- * VREG_BYPASS should be cleared to zero.
- */
- if (imx6_pcie->vph &&
- regulator_get_voltage(imx6_pcie->vph) > 3000000)
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
- IMX8MQ_GPR_PCIE_VREG_BYPASS,
- 0);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_RX_EQ_MASK,
- IMX6SX_GPR12_PCIE_RX_EQ_2);
- fallthrough;
- default:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ /* TODO: This code assumes external oscillator is being used */
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ imx_pcie_grp_offset(imx_pcie),
+ IMX8MQ_GPR_PCIE_REF_USE_PAD,
+ IMX8MQ_GPR_PCIE_REF_USE_PAD);
+ /*
+ * Per the datasheet, the PCIE_VPH is suggested to be 1.8V. If the
+ * PCIE_VPH is supplied by 3.3V, the VREG_BYPASS should be cleared
+ * to zero.
+ */
+ if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000)
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ imx_pcie_grp_offset(imx_pcie),
+ IMX8MQ_GPR_PCIE_VREG_BYPASS,
+ 0);
+
+ return 0;
+}
+
+static int imx_pcie_init_phy(struct imx_pcie *imx_pcie)
+{
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
- /* configure constant input signal to the pcie ctrl and phy */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
-
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN1,
- imx6_pcie->tx_deemph_gen1 << 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
- imx6_pcie->tx_deemph_gen2_3p5db << 6);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
- imx6_pcie->tx_deemph_gen2_6db << 12);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_FULL,
- imx6_pcie->tx_swing_full << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_LOW,
- imx6_pcie->tx_swing_low << 25);
- break;
- }
+ /* configure constant input signal to the pcie ctrl and phy */
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN1,
+ imx_pcie->tx_deemph_gen1 << 0);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
+ imx_pcie->tx_deemph_gen2_3p5db << 6);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
+ imx_pcie->tx_deemph_gen2_6db << 12);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_FULL,
+ imx_pcie->tx_swing_full << 18);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_LOW,
+ imx_pcie->tx_swing_low << 25);
+ return 0;
+}
- imx6_pcie_configure_type(imx6_pcie);
+static int imx6sx_pcie_init_phy(struct imx_pcie *imx_pcie)
+{
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2);
+
+ return imx_pcie_init_phy(imx_pcie);
}
-static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie)
{
u32 val;
- struct device *dev = imx6_pcie->pci->dev;
+ struct device *dev = imx_pcie->pci->dev;
- if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
+ if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr,
IOMUXC_GPR22, val,
val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
PHY_PLL_LOCK_WAIT_USLEEP_MAX,
@@ -382,15 +490,38 @@ static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
dev_err(dev, "PCIe PLL lock timeout\n");
}
-static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
+static int imx95_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie)
{
- unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
+ u32 val;
+ struct device *dev = imx_pcie->pci->dev;
+
+ if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr,
+ IMX95_PCIE_PHY_MPLLA_CTRL, val,
+ val & IMX95_PCIE_PHY_MPLL_STATE,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+ PHY_PLL_LOCK_WAIT_TIMEOUT)) {
+ dev_err(dev, "PCIe PLL lock timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie)
+{
+ unsigned long phy_rate = 0;
int mult, div;
u16 val;
+ int i;
+ struct clk_bulk_data *clks = imx_pcie->clks;
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY))
return 0;
+ for (i = 0; i < imx_pcie->num_clks; i++)
+ if (strncmp(clks[i].id, "pcie_phy", 8) == 0)
+ phy_rate = clk_get_rate(clks[i].clk);
+
switch (phy_rate) {
case 125000000:
/*
@@ -407,46 +538,46 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
div = 1;
break;
default:
- dev_err(imx6_pcie->pci->dev,
+ dev_err(imx_pcie->pci->dev,
"Unsupported PHY reference clock rate %lu\n", phy_rate);
return -EINVAL;
}
- pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
+ pcie_phy_read(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
- pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+ pcie_phy_write(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
- pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
+ pcie_phy_read(imx_pcie, PCIE_PHY_ATEOVRD, &val);
val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
val |= PCIE_PHY_ATEOVRD_EN;
- pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
+ pcie_phy_write(imx_pcie, PCIE_PHY_ATEOVRD, val);
return 0;
}
-static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_reset_phy(struct imx_pcie *imx_pcie)
{
u16 tmp;
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY))
return;
- pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp);
usleep_range(2000, 3000);
- pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp);
}
#ifdef CONFIG_ARM
@@ -485,164 +616,112 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
}
#endif
-static int imx6_pcie_attach_pd(struct device *dev)
+static int imx_pcie_attach_pd(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
struct device_link *link;
/* Do nothing when in a single power domain */
if (dev->pm_domain)
return 0;
- imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
- if (IS_ERR(imx6_pcie->pd_pcie))
- return PTR_ERR(imx6_pcie->pd_pcie);
+ imx_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
+ if (IS_ERR(imx_pcie->pd_pcie))
+ return PTR_ERR(imx_pcie->pd_pcie);
/* Do nothing when power domain missing */
- if (!imx6_pcie->pd_pcie)
+ if (!imx_pcie->pd_pcie)
return 0;
- link = device_link_add(dev, imx6_pcie->pd_pcie,
+ link = device_link_add(dev, imx_pcie->pd_pcie,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!link) {
- dev_err(dev, "Failed to add device_link to pcie pd.\n");
+ dev_err(dev, "Failed to add device_link to pcie pd\n");
return -EINVAL;
}
- imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pd_pcie_phy))
- return PTR_ERR(imx6_pcie->pd_pcie_phy);
+ imx_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
+ if (IS_ERR(imx_pcie->pd_pcie_phy))
+ return PTR_ERR(imx_pcie->pd_pcie_phy);
- link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
+ link = device_link_add(dev, imx_pcie->pd_pcie_phy,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!link) {
- dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
+ dev_err(dev, "Failed to add device_link to pcie_phy pd\n");
return -EINVAL;
}
return 0;
}
-static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
+static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct device *dev = pci->dev;
- unsigned int offset;
- int ret = 0;
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
- if (ret) {
- dev_err(dev, "unable to enable pcie_axi clock\n");
- break;
- }
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+ enable ? 0 : IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+ return 0;
+}
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
- break;
- case IMX6QP:
- case IMX6Q:
+static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
+{
+ if (enable) {
/* power up core phy and enable ref clock */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
/*
- * the async reset input need ref clock to sync internally,
+ * The async reset input need ref clock to sync internally,
* when the ref clock comes after reset, internal synced
* reset time is too short, cannot meet the requirement.
- * add one ~10us delay here.
+ * Add a ~10us delay here.
*/
usleep_range(10, 100);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
- break;
- case IMX7D:
- break;
- case IMX8MM:
- case IMX8MQ:
- case IMX8MP:
- ret = clk_prepare_enable(imx6_pcie->pcie_aux);
- if (ret) {
- dev_err(dev, "unable to enable pcie_aux clock\n");
- break;
- }
-
- offset = imx6_pcie_grp_offset(imx6_pcie);
- /*
- * Set the over ride low and enabled
- * make sure that REF_CLK is turned on.
- */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
- 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
- break;
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
+ } else {
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
}
- return ret;
+ return 0;
}
-static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
+static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
- break;
- case IMX6QP:
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD,
- IMX6Q_GPR1_PCIE_TEST_PD);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
- break;
- case IMX8MM:
- case IMX8MQ:
- case IMX8MP:
- clk_disable_unprepare(imx6_pcie->pcie_aux);
- break;
- default:
- break;
- }
+ int offset = imx_pcie_grp_offset(imx_pcie);
+
+ regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
+ enable ? 0 : IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
+ enable ? IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN : 0);
+ return 0;
}
-static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
+static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ enable ? 0 : IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ return 0;
+}
+
+static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie)
+{
+ struct dw_pcie *pci = imx_pcie->pci;
struct device *dev = pci->dev;
int ret;
- ret = clk_prepare_enable(imx6_pcie->pcie_phy);
- if (ret) {
- dev_err(dev, "unable to enable pcie_phy clock\n");
+ ret = clk_bulk_prepare_enable(imx_pcie->num_clks, imx_pcie->clks);
+ if (ret)
return ret;
- }
-
- ret = clk_prepare_enable(imx6_pcie->pcie_bus);
- if (ret) {
- dev_err(dev, "unable to enable pcie_bus clock\n");
- goto err_pcie_bus;
- }
-
- ret = clk_prepare_enable(imx6_pcie->pcie);
- if (ret) {
- dev_err(dev, "unable to enable pcie clock\n");
- goto err_pcie;
- }
- ret = imx6_pcie_enable_ref_clk(imx6_pcie);
- if (ret) {
- dev_err(dev, "unable to enable pcie ref clock\n");
- goto err_ref_clk;
+ if (imx_pcie->drvdata->enable_ref_clk) {
+ ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true);
+ if (ret) {
+ dev_err(dev, "Failed to enable PCIe REFCLK\n");
+ goto err_ref_clk;
+ }
}
/* allow the clocks to stabilize */
@@ -650,116 +729,156 @@ static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
return 0;
err_ref_clk:
- clk_disable_unprepare(imx6_pcie->pcie);
-err_pcie:
- clk_disable_unprepare(imx6_pcie->pcie_bus);
-err_pcie_bus:
- clk_disable_unprepare(imx6_pcie->pcie_phy);
+ clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks);
return ret;
}
-static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie)
{
- imx6_pcie_disable_ref_clk(imx6_pcie);
- clk_disable_unprepare(imx6_pcie->pcie);
- clk_disable_unprepare(imx6_pcie->pcie_bus);
- clk_disable_unprepare(imx6_pcie->pcie_phy);
+ if (imx_pcie->drvdata->enable_ref_clk)
+ imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
+ clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks);
}
-static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
+static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
{
- switch (imx6_pcie->drvdata->variant) {
- case IMX7D:
- case IMX8MQ:
- reset_control_assert(imx6_pcie->pciephy_reset);
- fallthrough;
- case IMX8MM:
- case IMX8MP:
- reset_control_assert(imx6_pcie->apps_reset);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- /* Force PCIe PHY reset */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET,
- IMX6SX_GPR5_PCIE_BTNRST_RESET);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST,
- IMX6Q_GPR1_PCIE_SW_RST);
- break;
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
- break;
- }
+ if (assert)
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- /* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio))
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high);
+ /* Force PCIe PHY reset */
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET,
+ assert ? IMX6SX_GPR5_PCIE_BTNRST_RESET : 0);
+ return 0;
+}
+
+static int imx6qp_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST,
+ assert ? IMX6Q_GPR1_PCIE_SW_RST : 0);
+ if (!assert)
+ usleep_range(200, 500);
+
+ return 0;
+}
+
+static int imx6q_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ if (!assert)
+ return 0;
+
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
+
+ return 0;
}
-static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct device *dev = pci->dev;
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MQ:
- reset_control_deassert(imx6_pcie->pciephy_reset);
- break;
- case IMX7D:
- reset_control_deassert(imx6_pcie->pciephy_reset);
+ if (assert)
+ return 0;
- /* Workaround for ERR010728, failure of PCI-e PLL VCO to
- * oscillate, especially when cold. This turns off "Duty-cycle
- * Corrector" and other mysterious undocumented things.
- */
- if (likely(imx6_pcie->phy_base)) {
- /* De-assert DCC_FB_EN */
- writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
- /* Assert RX_EQS and RX_EQS_SEL */
- writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
- | PCIE_PHY_CMN_REG24_RX_EQ,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
- /* Assert ATT_MODE */
- writel(PCIE_PHY_CMN_REG26_ATT_MODE,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
- } else {
- dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
- }
+ /*
+ * Workaround for ERR010728 (IMX7DS_2N09P, Rev. 1.1, 4/2023):
+ *
+ * PCIe: PLL may fail to lock under corner conditions.
+ *
+ * Initial VCO oscillation may fail under corner conditions such as
+ * cold temperature which will cause the PCIe PLL fail to lock in the
+ * initialization phase.
+ *
+ * The Duty-cycle Corrector calibration must be disabled.
+ *
+ * 1. De-assert the G_RST signal by clearing
+ * SRC_PCIEPHY_RCR[PCIEPHY_G_RST].
+ * 2. De-assert DCC_FB_EN by writing data “0x29” to the register
+ * address 0x306d0014 (PCIE_PHY_CMN_REG4).
+ * 3. Assert RX_EQS, RX_EQ_SEL by writing data “0x48” to the register
+ * address 0x306d0090 (PCIE_PHY_CMN_REG24).
+ * 4. Assert ATT_MODE by writing data “0xbc” to the register
+ * address 0x306d0098 (PCIE_PHY_CMN_REG26).
+ * 5. De-assert the CMN_RST signal by clearing register bit
+ * SRC_PCIEPHY_RCR[PCIEPHY_BTN]
+ */
- imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST, 0);
+ if (likely(imx_pcie->phy_base)) {
+ /* De-assert DCC_FB_EN */
+ writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, imx_pcie->phy_base + PCIE_PHY_CMN_REG4);
+ /* Assert RX_EQS and RX_EQS_SEL */
+ writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL | PCIE_PHY_CMN_REG24_RX_EQ,
+ imx_pcie->phy_base + PCIE_PHY_CMN_REG24);
+ /* Assert ATT_MODE */
+ writel(PCIE_PHY_CMN_REG26_ATT_MODE, imx_pcie->phy_base + PCIE_PHY_CMN_REG26);
+ } else {
+ dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
+ }
+ imx7d_pcie_wait_for_phy_pll_lock(imx_pcie);
+ return 0;
+}
- usleep_range(200, 500);
- break;
- case IMX6Q: /* Nothing to do */
- case IMX8MM:
- case IMX8MP:
- break;
+static int imx95_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ u32 val;
+
+ if (assert) {
+ /*
+ * From i.MX95 PCIe PHY perspective, the COLD reset toggle
+ * should be complete after power-up by the following sequence.
+ * > 10us(at power-up)
+ * > 10ns(warm reset)
+ * |<------------>|
+ * ______________
+ * phy_reset ____/ \________________
+ * ____________
+ * ref_clk_en_______________________/
+ * Toggle COLD reset aligned with this sequence for i.MX95 PCIe.
+ */
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ IMX95_PCIE_COLD_RST);
+ /*
+ * Make sure the write to IMX95_PCIE_RST_CTRL is flushed to the
+ * hardware by doing a read. Otherwise, there is no guarantee
+ * that the write has reached the hardware before udelay().
+ */
+ regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ &val);
+ udelay(15);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ IMX95_PCIE_COLD_RST);
+ regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ &val);
+ udelay(10);
}
+ return 0;
+}
+
+static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
+{
+ reset_control_assert(imx_pcie->pciephy_reset);
+
+ if (imx_pcie->drvdata->core_reset)
+ imx_pcie->drvdata->core_reset(imx_pcie, true);
+
/* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1);
+}
+
+static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie)
+{
+ reset_control_deassert(imx_pcie->pciephy_reset);
+
+ if (imx_pcie->drvdata->core_reset)
+ imx_pcie->drvdata->core_reset(imx_pcie, false);
+
+ /* Some boards don't have PCIe reset GPIO. */
+ if (imx_pcie->reset_gpiod) {
msleep(100);
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- !imx6_pcie->gpio_active_high);
+ gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0);
/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
msleep(100);
}
@@ -767,9 +886,9 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
return 0;
}
-static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_wait_for_speed_change(struct imx_pcie *imx_pcie)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct device *dev = pci->dev;
u32 tmp;
unsigned int retries;
@@ -786,55 +905,49 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
return -ETIMEDOUT;
}
-static void imx6_pcie_ltssm_enable(struct device *dev)
+static void imx_pcie_ltssm_enable(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6Q:
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2,
- IMX6Q_GPR12_PCIE_CTL_2);
- break;
- case IMX7D:
- case IMX8MQ:
- case IMX8MM:
- case IMX8MP:
- reset_control_deassert(imx6_pcie->apps_reset);
- break;
- }
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
+ u8 offset = dw_pcie_find_capability(imx_pcie->pci, PCI_CAP_ID_EXP);
+ u32 tmp;
+
+ tmp = dw_pcie_readl_dbi(imx_pcie->pci, offset + PCI_EXP_LNKCAP);
+ phy_set_speed(imx_pcie->phy, FIELD_GET(PCI_EXP_LNKCAP_SLS, tmp));
+ if (drvdata->ltssm_mask)
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask,
+ drvdata->ltssm_mask);
+
+ reset_control_deassert(imx_pcie->apps_reset);
}
-static void imx6_pcie_ltssm_disable(struct device *dev)
+static void imx_pcie_ltssm_disable(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6Q:
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 0);
- break;
- case IMX7D:
- case IMX8MQ:
- case IMX8MM:
- case IMX8MP:
- reset_control_assert(imx6_pcie->apps_reset);
- break;
- }
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
+
+ phy_set_speed(imx_pcie->phy, 0);
+ if (drvdata->ltssm_mask)
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off,
+ drvdata->ltssm_mask, 0);
+
+ reset_control_assert(imx_pcie->apps_reset);
}
-static int imx6_pcie_start_link(struct dw_pcie *pci)
+static int imx_pcie_start_link(struct dw_pcie *pci)
{
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
struct device *dev = pci->dev;
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 tmp;
int ret;
+ if (!(imx_pcie->drvdata->flags &
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND)) {
+ imx_pcie_ltssm_enable(dev);
+ return 0;
+ }
+
/*
* Force Gen1 operation when starting the link. In case the link is
* started in Gen2 mode, there is a possibility the devices on the
@@ -848,18 +961,18 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
dw_pcie_dbi_ro_wr_dis(pci);
/* Start LTSSM. */
- imx6_pcie_ltssm_enable(dev);
+ imx_pcie_ltssm_enable(dev);
- ret = dw_pcie_wait_for_link(pci);
- if (ret)
- goto err_reset_phy;
+ if (pci->max_link_speed > 1) {
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto err_reset_phy;
- if (pci->link_gen > 1) {
/* Allow faster modes after the link is up */
dw_pcie_dbi_ro_wr_en(pci);
tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
tmp &= ~PCI_EXP_LNKCAP_SLS;
- tmp |= pci->link_gen;
+ tmp |= pci->max_link_speed;
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
/*
@@ -871,63 +984,226 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
dw_pcie_dbi_ro_wr_dis(pci);
- if (imx6_pcie->drvdata->flags &
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
- /*
- * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
- * from i.MX6 family when no link speed transition
- * occurs and we go Gen1 -> yep, Gen1. The difference
- * is that, in such case, it will not be cleared by HW
- * which will cause the following code to report false
- * failure.
- */
-
- ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
- if (ret) {
- dev_err(dev, "Failed to bring link up!\n");
- goto err_reset_phy;
- }
- }
-
- /* Make sure link training is finished as well! */
- ret = dw_pcie_wait_for_link(pci);
- if (ret)
+ ret = imx_pcie_wait_for_speed_change(imx_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
+ }
} else {
dev_info(dev, "Link: Only Gen1 is enabled\n");
}
- imx6_pcie->link_is_up = true;
- tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
- dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
- imx6_pcie->link_is_up = false;
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
- imx6_pcie_reset_phy(imx6_pcie);
+ imx_pcie_reset_phy(imx_pcie);
return 0;
}
-static void imx6_pcie_stop_link(struct dw_pcie *pci)
+static void imx_pcie_stop_link(struct dw_pcie *pci)
{
struct device *dev = pci->dev;
/* Turn off PCIe LTSSM */
- imx6_pcie_ltssm_disable(dev);
+ imx_pcie_ltssm_disable(dev);
}
-static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
+static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid)
+{
+ struct dw_pcie *pci = imx_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 data1, data2;
+ int free = -1;
+ int i;
+
+ if (sid >= 64) {
+ dev_err(dev, "Invalid SID for index %d\n", sid);
+ return -EINVAL;
+ }
+
+ guard(mutex)(&imx_pcie->lock);
+
+ /*
+ * Iterate through all LUT entries to check for duplicate RID and
+ * identify the first available entry. Configure this available entry
+ * immediately after verification to avoid rescanning it.
+ */
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1);
+
+ if (!(data1 & IMX95_PE0_LUT_VLD)) {
+ if (free < 0)
+ free = i;
+ continue;
+ }
+
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+
+ /* Do not add duplicate RID */
+ if (rid == FIELD_GET(IMX95_PE0_LUT_REQID, data2)) {
+ dev_warn(dev, "Existing LUT entry available for RID (%d)", rid);
+ return 0;
+ }
+ }
+
+ if (free < 0) {
+ dev_err(dev, "LUT entry is not available\n");
+ return -ENOSPC;
+ }
+
+ data1 = FIELD_PREP(IMX95_PE0_LUT_DAC_ID, 0);
+ data1 |= FIELD_PREP(IMX95_PE0_LUT_STREAM_ID, sid);
+ data1 |= IMX95_PE0_LUT_VLD;
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1);
+
+ if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE)
+ data2 = 0x7; /* In the EP mode, only 'Device ID' is required */
+ else
+ data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */
+ data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2);
+
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, free);
+
+ return 0;
+}
+
+static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid)
+{
+ u32 data2;
+ int i;
+
+ guard(mutex)(&imx_pcie->lock);
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+ if (FIELD_GET(IMX95_PE0_LUT_REQID, data2) == rid) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_DATA1, 0);
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_DATA2, 0);
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, i);
+
+ break;
+ }
+ }
+}
+
+static int imx_pcie_add_lut_by_rid(struct imx_pcie *imx_pcie, u32 rid)
+{
+ struct device *dev = imx_pcie->pci->dev;
+ struct device_node *target;
+ u32 sid_i, sid_m;
+ int err_i, err_m;
+ u32 sid = 0;
+
+ target = NULL;
+ err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask",
+ &target, &sid_i);
+ if (target) {
+ of_node_put(target);
+ } else {
+ /*
+ * "target == NULL && err_i == 0" means RID out of map range.
+ * Use 1:1 map RID to streamID. Hardware can't support this
+ * because the streamID is only 6 bits
+ */
+ err_i = -EINVAL;
+ }
+
+ target = NULL;
+ err_m = of_map_id(dev->of_node, rid, "msi-map", "msi-map-mask",
+ &target, &sid_m);
+
+ /*
+ * err_m target
+ * 0 NULL RID out of range. Use 1:1 map RID to
+ * streamID, Current hardware can't
+ * support it, so return -EINVAL.
+ * != 0 NULL msi-map does not exist, use built-in MSI
+ * 0 != NULL Get correct streamID from RID
+ * != 0 != NULL Invalid combination
+ */
+ if (!err_m && !target)
+ return -EINVAL;
+ else if (target)
+ of_node_put(target); /* Find streamID map entry for RID in msi-map */
+
+ /*
+ * msi-map iommu-map
+ * N N DWC MSI Ctrl
+ * Y Y ITS + SMMU, require the same SID
+ * Y N ITS
+ * N Y DWC MSI Ctrl + SMMU
+ */
+ if (err_i && err_m)
+ return 0;
+
+ if (!err_i && !err_m) {
+ /*
+ * Glue Layer
+ * <==========>
+ * ┌─────┐ ┌──────────┐
+ * │ LUT │ 6-bit streamID │ │
+ * │ │─────────────────►│ MSI │
+ * └─────┘ 2-bit ctrl ID │ │
+ * ┌───────────►│ │
+ * (i.MX95) │ │ │
+ * 00 PCIe0 │ │ │
+ * 01 ENETC │ │ │
+ * 10 PCIe1 │ │ │
+ * │ └──────────┘
+ * The MSI glue layer auto adds 2 bits controller ID ahead of
+ * streamID, so mask these 2 bits to get streamID. The
+ * IOMMU glue layer doesn't do that.
+ */
+ if (sid_i != (sid_m & IMX95_SID_MASK)) {
+ dev_err(dev, "iommu-map and msi-map entries mismatch!\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!err_i)
+ sid = sid_i;
+ else if (!err_m)
+ sid = sid_m & IMX95_SID_MASK;
+
+ return imx_pcie_add_lut(imx_pcie, rid, sid);
+}
+
+static int imx_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
+{
+ struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
+
+ return imx_pcie_add_lut_by_rid(imx_pcie, pci_dev_id(pdev));
+}
+
+static void imx_pcie_disable_device(struct pci_host_bridge *bridge,
+ struct pci_dev *pdev)
+{
+ struct imx_pcie *imx_pcie;
+
+ imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
+ imx_pcie_remove_lut(imx_pcie, pci_dev_id(pdev));
+}
+
+static int imx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
int ret;
- if (imx6_pcie->vpcie) {
- ret = regulator_enable(imx6_pcie->vpcie);
+ if (imx_pcie->vpcie) {
+ ret = regulator_enable(imx_pcie->vpcie);
if (ret) {
dev_err(dev, "failed to enable vpcie regulator: %d\n",
ret);
@@ -935,166 +1211,402 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
}
}
- imx6_pcie_assert_core_reset(imx6_pcie);
- imx6_pcie_init_phy(imx6_pcie);
+ if (pp->bridge && imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) {
+ pp->bridge->enable_device = imx_pcie_enable_device;
+ pp->bridge->disable_device = imx_pcie_disable_device;
+ }
+
+ imx_pcie_assert_core_reset(imx_pcie);
+
+ if (imx_pcie->drvdata->init_phy)
+ imx_pcie->drvdata->init_phy(imx_pcie);
- ret = imx6_pcie_clk_enable(imx6_pcie);
+ imx_pcie_configure_type(imx_pcie);
+
+ ret = imx_pcie_clk_enable(imx_pcie);
if (ret) {
dev_err(dev, "unable to enable pcie clocks: %d\n", ret);
goto err_reg_disable;
}
- if (imx6_pcie->phy) {
- ret = phy_init(imx6_pcie->phy);
+ if (imx_pcie->phy) {
+ ret = phy_init(imx_pcie->phy);
if (ret) {
dev_err(dev, "pcie PHY power up failed\n");
goto err_clk_disable;
}
- }
- if (imx6_pcie->phy) {
- ret = phy_power_on(imx6_pcie->phy);
+ ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE,
+ imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE ?
+ PHY_MODE_PCIE_EP : PHY_MODE_PCIE_RC);
+ if (ret) {
+ dev_err(dev, "unable to set PCIe PHY mode\n");
+ goto err_phy_exit;
+ }
+
+ ret = phy_power_on(imx_pcie->phy);
if (ret) {
dev_err(dev, "waiting for PHY ready timeout!\n");
- goto err_phy_off;
+ goto err_phy_exit;
}
}
- ret = imx6_pcie_deassert_core_reset(imx6_pcie);
+ /* Make sure that PCIe LTSSM is cleared */
+ imx_pcie_ltssm_disable(dev);
+
+ ret = imx_pcie_deassert_core_reset(imx_pcie);
if (ret < 0) {
dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
goto err_phy_off;
}
- imx6_setup_phy_mpll(imx6_pcie);
+ if (imx_pcie->drvdata->wait_pll_lock) {
+ ret = imx_pcie->drvdata->wait_pll_lock(imx_pcie);
+ if (ret < 0)
+ goto err_phy_off;
+ }
+
+ imx_setup_phy_mpll(imx_pcie);
return 0;
err_phy_off:
- if (imx6_pcie->phy)
- phy_exit(imx6_pcie->phy);
+ phy_power_off(imx_pcie->phy);
+err_phy_exit:
+ phy_exit(imx_pcie->phy);
err_clk_disable:
- imx6_pcie_clk_disable(imx6_pcie);
+ imx_pcie_clk_disable(imx_pcie);
err_reg_disable:
- if (imx6_pcie->vpcie)
- regulator_disable(imx6_pcie->vpcie);
+ if (imx_pcie->vpcie)
+ regulator_disable(imx_pcie->vpcie);
return ret;
}
-static void imx6_pcie_host_exit(struct dw_pcie_rp *pp)
+static void imx_pcie_host_exit(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
- if (imx6_pcie->phy) {
- if (phy_power_off(imx6_pcie->phy))
+ if (imx_pcie->phy) {
+ if (phy_power_off(imx_pcie->phy))
dev_err(pci->dev, "unable to power off PHY\n");
- phy_exit(imx6_pcie->phy);
+ phy_exit(imx_pcie->phy);
}
- imx6_pcie_clk_disable(imx6_pcie);
+ imx_pcie_clk_disable(imx_pcie);
+
+ if (imx_pcie->vpcie)
+ regulator_disable(imx_pcie->vpcie);
+}
+
+static void imx_pcie_host_post_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+ u32 val;
+
+ if (imx_pcie->drvdata->flags & IMX_PCIE_FLAG_8GT_ECN_ERR051586) {
+ /*
+ * ERR051586: Compliance with 8GT/s Receiver Impedance ECN
+ *
+ * The default value of GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL]
+ * is 1 which makes receiver non-compliant with the ZRX-DC
+ * parameter for 2.5 GT/s when operating at 8 GT/s or higher.
+ * It causes unnecessary timeout in L1.
+ *
+ * Workaround: Program GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL]
+ * to 0.
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+}
+
+/*
+ * In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2
+ * register is reserved, so the generic DWC implementation of sending the
+ * PME_Turn_Off message using a dummy MMIO write cannot be used.
+ */
+static void imx_pcie_pme_turn_off(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF);
- if (imx6_pcie->vpcie)
- regulator_disable(imx6_pcie->vpcie);
+ usleep_range(PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US);
}
-static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
- .host_init = imx6_pcie_host_init,
+static const struct dw_pcie_host_ops imx_pcie_host_ops = {
+ .init = imx_pcie_host_init,
+ .deinit = imx_pcie_host_exit,
+ .pme_turn_off = imx_pcie_pme_turn_off,
+};
+
+static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = {
+ .init = imx_pcie_host_init,
+ .deinit = imx_pcie_host_exit,
+ .post_init = imx_pcie_host_post_init,
};
static const struct dw_pcie_ops dw_pcie_ops = {
- .start_link = imx6_pcie_start_link,
+ .start_link = imx_pcie_start_link,
+ .stop_link = imx_pcie_stop_link,
};
-static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_ep_init(struct dw_pcie_ep *ep)
{
- struct device *dev = imx6_pcie->pci->dev;
+ enum pci_barno bar;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- /* Some variants have a turnoff reset in DT */
- if (imx6_pcie->turnoff_reset) {
- reset_control_assert(imx6_pcie->turnoff_reset);
- reset_control_deassert(imx6_pcie->turnoff_reset);
- goto pm_turnoff_sleep;
- }
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
- /* Others poke directly at IOMUXC registers */
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
- break;
+static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
default:
- dev_err(dev, "PME_Turn_Off not implemented\n");
- return;
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
}
- /*
- * Components with an upstream port must respond to
- * PME_Turn_Off with PME_TO_Ack but we can't check.
- *
- * The standard recommends a 1-10ms timeout after which to
- * proceed anyway as if acks were received.
- */
-pm_turnoff_sleep:
- usleep_range(1000, 10000);
+ return 0;
}
-static int imx6_pcie_suspend_noirq(struct device *dev)
+static const struct pci_epc_features imx8m_pcie_epc_features = {
+ .msi_capable = true,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_256, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_64K,
+};
+
+static const struct pci_epc_features imx8q_pcie_epc_features = {
+ .msi_capable = true,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_64K,
+};
+
+/*
+ * | Default | Default | Default | BAR Sizing
+ * BAR# | Enable? | Type | Size | Scheme
+ * =======================================================
+ * BAR0 | Enable | 64-bit | 1 MB | Programmable Size
+ * BAR1 | Disable | 32-bit | 64 KB | Fixed Size
+ * (BAR1 should be disabled if BAR0 is 64-bit)
+ * BAR2 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR3 | Enable | 32-bit | 64 KB | Programmable Size
+ * BAR4 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR5 | Enable | 32-bit | 64 KB | Programmable Size
+ */
+static const struct pci_epc_features imx95_pcie_epc_features = {
+ .msi_capable = true,
+ .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
+ .align = SZ_4K,
+};
+
+static const struct pci_epc_features*
+imx_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+
+ return imx_pcie->drvdata->epc_features;
+}
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .init = imx_pcie_ep_init,
+ .raise_irq = imx_pcie_ep_raise_irq,
+ .get_features = imx_pcie_ep_get_features,
+};
+
+static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct dw_pcie_ep *ep;
+ struct dw_pcie *pci = imx_pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = pci->dev;
+
+ imx_pcie_host_init(pp);
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT))
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+
+ ep->page_size = imx_pcie->drvdata->epc_features->align;
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+ imx_pcie_host_post_init(pp);
+
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(ep->epc);
+
+ return 0;
+}
+
+static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save)
+{
+ u8 offset;
+ u16 val;
+ struct dw_pcie *pci = imx_pcie->pci;
+
+ if (pci_msi_enabled()) {
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+ if (save) {
+ val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
+ imx_pcie->msi_ctrl = val;
+ } else {
+ dw_pcie_dbi_ro_wr_en(pci);
+ val = imx_pcie->msi_ctrl;
+ dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+ }
+}
+
+static void imx_pcie_lut_save(struct imx_pcie *imx_pcie)
+{
+ u32 data1, data2;
+ int i;
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL,
+ IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+ if (data1 & IMX95_PE0_LUT_VLD) {
+ imx_pcie->luts[i].data1 = data1;
+ imx_pcie->luts[i].data2 = data2;
+ } else {
+ imx_pcie->luts[i].data1 = 0;
+ imx_pcie->luts[i].data2 = 0;
+ }
+ }
+}
+
+static void imx_pcie_lut_restore(struct imx_pcie *imx_pcie)
+{
+ int i;
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ if ((imx_pcie->luts[i].data1 & IMX95_PE0_LUT_VLD) == 0)
+ continue;
+
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1,
+ imx_pcie->luts[i].data1);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2,
+ imx_pcie->luts[i].data2);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, i);
+ }
+}
+
+static int imx_pcie_suspend_noirq(struct device *dev)
+{
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
- imx6_pcie_pm_turnoff(imx6_pcie);
- imx6_pcie_stop_link(imx6_pcie->pci);
- imx6_pcie_host_exit(pp);
+ imx_pcie_msi_save_restore(imx_pcie, true);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT))
+ imx_pcie_lut_save(imx_pcie);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) {
+ /*
+ * The minimum for a workaround would be to set PERST# and to
+ * set the PCIE_TEST_PD flag. However, we can also disable the
+ * clock which saves some power.
+ */
+ imx_pcie_assert_core_reset(imx_pcie);
+ imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
+ } else {
+ return dw_pcie_suspend_noirq(imx_pcie->pci);
+ }
return 0;
}
-static int imx6_pcie_resume_noirq(struct device *dev)
+static int imx_pcie_resume_noirq(struct device *dev)
{
int ret;
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
- ret = imx6_pcie_host_init(pp);
- if (ret)
- return ret;
- dw_pcie_setup_rc(pp);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) {
+ ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true);
+ if (ret)
+ return ret;
+ ret = imx_pcie_deassert_core_reset(imx_pcie);
+ if (ret)
+ return ret;
- if (imx6_pcie->link_is_up)
- imx6_pcie_start_link(imx6_pcie->pci);
+ /*
+ * Using PCIE_TEST_PD seems to disable MSI and powers down the
+ * root complex. This is why we have to setup the rc again and
+ * why we have to restore the MSI register.
+ */
+ ret = dw_pcie_setup_rc(&imx_pcie->pci->pp);
+ if (ret)
+ return ret;
+ } else {
+ ret = dw_pcie_resume_noirq(imx_pcie->pci);
+ if (ret)
+ return ret;
+ }
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT))
+ imx_pcie_lut_restore(imx_pcie);
+ imx_pcie_msi_save_restore(imx_pcie, false);
return 0;
}
-static const struct dev_pm_ops imx6_pcie_pm_ops = {
- NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
- imx6_pcie_resume_noirq)
+static const struct dev_pm_ops imx_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_pcie_suspend_noirq,
+ imx_pcie_resume_noirq)
};
-static int imx6_pcie_probe(struct platform_device *pdev)
+static int imx_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
- struct imx6_pcie *imx6_pcie;
+ struct imx_pcie *imx_pcie;
struct device_node *np;
- struct resource *dbi_base;
struct device_node *node = dev->of_node;
- int ret;
+ int ret, domain;
u16 val;
- imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
- if (!imx6_pcie)
+ imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL);
+ if (!imx_pcie)
return -ENOMEM;
pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
@@ -1103,10 +1615,16 @@ static int imx6_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
- pci->pp.ops = &imx6_pcie_host_ops;
- imx6_pcie->pci = pci;
- imx6_pcie->drvdata = of_device_get_match_data(dev);
+ imx_pcie->pci = pci;
+ imx_pcie->drvdata = of_device_get_match_data(dev);
+
+ mutex_init(&imx_pcie->lock);
+
+ if (imx_pcie->drvdata->ops)
+ pci->pp.ops = imx_pcie->drvdata->ops;
+ else
+ pci->pp.ops = &imx_pcie_host_dw_pme_ops;
/* Find the PHY if one is defined, only imx7d uses it */
np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
@@ -1118,234 +1636,344 @@ static int imx6_pcie_probe(struct platform_device *pdev)
dev_err(dev, "Unable to map PCIe PHY\n");
return ret;
}
- imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
- if (IS_ERR(imx6_pcie->phy_base))
- return PTR_ERR(imx6_pcie->phy_base);
+ imx_pcie->phy_base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(imx_pcie->phy_base))
+ return PTR_ERR(imx_pcie->phy_base);
}
- dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
/* Fetch GPIOs */
- imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
- imx6_pcie->gpio_active_high = of_property_read_bool(node,
- "reset-gpio-active-high");
- if (gpio_is_valid(imx6_pcie->reset_gpio)) {
- ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high ?
- GPIOF_OUT_INIT_HIGH :
- GPIOF_OUT_INIT_LOW,
- "PCIe reset");
- if (ret) {
- dev_err(dev, "unable to get reset gpio\n");
- return ret;
- }
- } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
- return imx6_pcie->reset_gpio;
- }
+ imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(imx_pcie->reset_gpiod))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->reset_gpiod),
+ "unable to get reset gpio\n");
+ gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset");
/* Fetch clocks */
- imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(imx6_pcie->pcie_bus))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus),
- "pcie_bus clock source missing or invalid\n");
-
- imx6_pcie->pcie = devm_clk_get(dev, "pcie");
- if (IS_ERR(imx6_pcie->pcie))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie),
- "pcie clock source missing or invalid\n");
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
- "pcie_inbound_axi");
- if (IS_ERR(imx6_pcie->pcie_inbound_axi))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi),
- "pcie_inbound_axi clock missing or invalid\n");
- break;
- case IMX8MQ:
- imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
- if (IS_ERR(imx6_pcie->pcie_aux))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
- "pcie_aux clock source missing or invalid\n");
- fallthrough;
- case IMX7D:
- if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
- imx6_pcie->controller_id = 1;
-
- imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
- "pciephy");
- if (IS_ERR(imx6_pcie->pciephy_reset)) {
- dev_err(dev, "Failed to get PCIEPHY reset control\n");
- return PTR_ERR(imx6_pcie->pciephy_reset);
- }
+ imx_pcie->num_clks = devm_clk_bulk_get_all(dev, &imx_pcie->clks);
+ if (imx_pcie->num_clks < 0)
+ return dev_err_probe(dev, imx_pcie->num_clks,
+ "failed to get clocks\n");
+
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) {
+ imx_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(imx_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->phy),
+ "failed to get pcie phy\n");
+ }
- imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
- "apps");
- if (IS_ERR(imx6_pcie->apps_reset)) {
- dev_err(dev, "Failed to get PCIE APPS reset control\n");
- return PTR_ERR(imx6_pcie->apps_reset);
- }
- break;
- case IMX8MM:
- case IMX8MP:
- imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
- if (IS_ERR(imx6_pcie->pcie_aux))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
- "pcie_aux clock source missing or invalid\n");
- imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
- "apps");
- if (IS_ERR(imx6_pcie->apps_reset))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset),
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_APP_RESET)) {
+ imx_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps");
+ if (IS_ERR(imx_pcie->apps_reset))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->apps_reset),
"failed to get pcie apps reset control\n");
+ }
- imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
- if (IS_ERR(imx6_pcie->phy))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
- "failed to get pcie phy\n");
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHY_RESET)) {
+ imx_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy");
+ if (IS_ERR(imx_pcie->pciephy_reset))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->pciephy_reset),
+ "Failed to get PCIEPHY reset control\n");
+ }
+
+ switch (imx_pcie->drvdata->variant) {
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ domain = of_get_pci_domain_nr(node);
+ if (domain < 0 || domain > 1)
+ return dev_err_probe(dev, -ENODEV, "no \"linux,pci-domain\" property in devicetree\n");
+ imx_pcie->controller_id = domain;
break;
default:
break;
}
- /* Don't fetch the pcie_phy clock, if it has abstract PHY driver */
- if (imx6_pcie->phy == NULL) {
- imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pcie_phy))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
- "pcie_phy clock source missing or invalid\n");
+
+ if (imx_pcie->drvdata->gpr) {
+ /* Grab GPR config register range */
+ imx_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible(imx_pcie->drvdata->gpr);
+ if (IS_ERR(imx_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr),
+ "unable to find iomuxc registers\n");
}
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_SERDES)) {
+ void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app");
- /* Grab turnoff reset */
- imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
- if (IS_ERR(imx6_pcie->turnoff_reset)) {
- dev_err(dev, "Failed to get TURNOFF reset control\n");
- return PTR_ERR(imx6_pcie->turnoff_reset);
- }
+ if (IS_ERR(off))
+ return dev_err_probe(dev, PTR_ERR(off),
+ "unable to find serdes registers\n");
- /* Grab GPR config register range */
- imx6_pcie->iomuxc_gpr =
- syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
- if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
- dev_err(dev, "unable to find iomuxc registers\n");
- return PTR_ERR(imx6_pcie->iomuxc_gpr);
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+
+ imx_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, &regmap_config);
+ if (IS_ERR(imx_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr),
+ "unable to find iomuxc registers\n");
}
/* Grab PCIe PHY Tx Settings */
if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
- &imx6_pcie->tx_deemph_gen1))
- imx6_pcie->tx_deemph_gen1 = 0;
+ &imx_pcie->tx_deemph_gen1))
+ imx_pcie->tx_deemph_gen1 = 0;
if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
- &imx6_pcie->tx_deemph_gen2_3p5db))
- imx6_pcie->tx_deemph_gen2_3p5db = 0;
+ &imx_pcie->tx_deemph_gen2_3p5db))
+ imx_pcie->tx_deemph_gen2_3p5db = 0;
if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
- &imx6_pcie->tx_deemph_gen2_6db))
- imx6_pcie->tx_deemph_gen2_6db = 20;
+ &imx_pcie->tx_deemph_gen2_6db))
+ imx_pcie->tx_deemph_gen2_6db = 20;
if (of_property_read_u32(node, "fsl,tx-swing-full",
- &imx6_pcie->tx_swing_full))
- imx6_pcie->tx_swing_full = 127;
+ &imx_pcie->tx_swing_full))
+ imx_pcie->tx_swing_full = 127;
if (of_property_read_u32(node, "fsl,tx-swing-low",
- &imx6_pcie->tx_swing_low))
- imx6_pcie->tx_swing_low = 127;
+ &imx_pcie->tx_swing_low))
+ imx_pcie->tx_swing_low = 127;
/* Limit link speed */
- pci->link_gen = 1;
- of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
-
- imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
- if (IS_ERR(imx6_pcie->vpcie)) {
- if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
- return PTR_ERR(imx6_pcie->vpcie);
- imx6_pcie->vpcie = NULL;
+ pci->max_link_speed = 1;
+ of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed);
+
+ ret = devm_regulator_get_enable_optional(&pdev->dev, "vpcie3v3aux");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to enable Vaux supply\n");
+
+ imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
+ if (IS_ERR(imx_pcie->vpcie)) {
+ if (PTR_ERR(imx_pcie->vpcie) != -ENODEV)
+ return PTR_ERR(imx_pcie->vpcie);
+ imx_pcie->vpcie = NULL;
}
- imx6_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
- if (IS_ERR(imx6_pcie->vph)) {
- if (PTR_ERR(imx6_pcie->vph) != -ENODEV)
- return PTR_ERR(imx6_pcie->vph);
- imx6_pcie->vph = NULL;
+ imx_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
+ if (IS_ERR(imx_pcie->vph)) {
+ if (PTR_ERR(imx_pcie->vph) != -ENODEV)
+ return PTR_ERR(imx_pcie->vph);
+ imx_pcie->vph = NULL;
}
- platform_set_drvdata(pdev, imx6_pcie);
+ platform_set_drvdata(pdev, imx_pcie);
- ret = imx6_pcie_attach_pd(dev);
+ ret = imx_pcie_attach_pd(dev);
if (ret)
return ret;
- ret = dw_pcie_host_init(&pci->pp);
- if (ret < 0)
- return ret;
+ pci->use_parent_dt_ranges = true;
+ if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
+ ret = imx_add_pcie_ep(imx_pcie, pdev);
+ if (ret < 0)
+ return ret;
- if (pci_msi_enabled()) {
- u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
- val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
- val |= PCI_MSI_FLAGS_ENABLE;
- dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
+ /*
+ * FIXME: Only single Device (EPF) is supported due to the
+ * Endpoint framework limitation.
+ */
+ imx_pcie_add_lut_by_rid(imx_pcie, 0);
+ } else {
+ pci->pp.use_atu_msg = true;
+ ret = dw_pcie_host_init(&pci->pp);
+ if (ret < 0)
+ return ret;
+
+ if (pci_msi_enabled()) {
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+
+ val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
+ val |= PCI_MSI_FLAGS_ENABLE;
+ dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
+ }
}
return 0;
}
-static void imx6_pcie_shutdown(struct platform_device *pdev)
+static void imx_pcie_shutdown(struct platform_device *pdev)
{
- struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
+ struct imx_pcie *imx_pcie = platform_get_drvdata(pdev);
/* bring down link, so bootloader gets clean state in case of reboot */
- imx6_pcie_assert_core_reset(imx6_pcie);
+ imx_pcie_assert_core_reset(imx_pcie);
}
-static const struct imx6_pcie_drvdata drvdata[] = {
+static const struct imx_pcie_drvdata drvdata[] = {
[IMX6Q] = {
.variant = IMX6Q,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
+ IMX_PCIE_FLAG_BROKEN_SUSPEND |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx_pcie_init_phy,
+ .enable_ref_clk = imx6q_pcie_enable_ref_clk,
+ .core_reset = imx6q_pcie_core_reset,
},
[IMX6SX] = {
.variant = IMX6SX,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
- IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx6q-iomuxc-gpr",
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx6sx_pcie_init_phy,
+ .enable_ref_clk = imx6sx_pcie_enable_ref_clk,
+ .core_reset = imx6sx_pcie_core_reset,
+ .ops = &imx_pcie_host_ops,
},
[IMX6QP] = {
.variant = IMX6QP,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
- IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx_pcie_init_phy,
+ .enable_ref_clk = imx6q_pcie_enable_ref_clk,
+ .core_reset = imx6qp_pcie_core_reset,
+ .ops = &imx_pcie_host_ops,
},
[IMX7D] = {
.variant = IMX7D,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET,
.gpr = "fsl,imx7d-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .enable_ref_clk = imx7d_pcie_enable_ref_clk,
+ .core_reset = imx7d_pcie_core_reset,
},
[IMX8MQ] = {
.variant = IMX8MQ,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx8mq-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .mode_off[1] = IOMUXC_GPR12,
+ .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ .init_phy = imx8mq_pcie_init_phy,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MM] = {
.variant = IMX8MM,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mm-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MP] = {
.variant = IMX8MP,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mp-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8Q] = {
+ .variant = IMX8Q,
+ .flags = IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_CPU_ADDR_FIXUP |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
+ },
+ [IMX95] = {
+ .variant = IMX95,
+ .flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_HAS_LUT |
+ IMX_PCIE_FLAG_8GT_ECN_ERR051586 |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .ltssm_off = IMX95_PE0_GEN_CTRL_3,
+ .ltssm_mask = IMX95_PCIE_LTSSM_EN,
+ .mode_off[0] = IMX95_PE0_GEN_CTRL_1,
+ .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
+ .core_reset = imx95_pcie_core_reset,
+ .init_phy = imx95_pcie_init_phy,
+ .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock,
+ },
+ [IMX8MQ_EP] = {
+ .variant = IMX8MQ_EP,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET,
+ .mode = DW_PCIE_EP_TYPE,
+ .gpr = "fsl,imx8mq-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .mode_off[1] = IOMUXC_GPR12,
+ .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ .epc_features = &imx8q_pcie_epc_features,
+ .init_phy = imx8mq_pcie_init_phy,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8MM_EP] = {
+ .variant = IMX8MM_EP,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHYDRV,
+ .mode = DW_PCIE_EP_TYPE,
+ .gpr = "fsl,imx8mm-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .epc_features = &imx8m_pcie_epc_features,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8MP_EP] = {
+ .variant = IMX8MP_EP,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHYDRV,
+ .mode = DW_PCIE_EP_TYPE,
+ .gpr = "fsl,imx8mp-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .epc_features = &imx8m_pcie_epc_features,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8Q_EP] = {
+ .variant = IMX8Q_EP,
+ .flags = IMX_PCIE_FLAG_HAS_PHYDRV,
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &imx8q_pcie_epc_features,
+ },
+ [IMX95_EP] = {
+ .variant = IMX95_EP,
+ .flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_8GT_ECN_ERR051586 |
+ IMX_PCIE_FLAG_SUPPORT_64BIT,
+ .ltssm_off = IMX95_PE0_GEN_CTRL_3,
+ .ltssm_mask = IMX95_PCIE_LTSSM_EN,
+ .mode_off[0] = IMX95_PE0_GEN_CTRL_1,
+ .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
+ .init_phy = imx95_pcie_init_phy,
+ .core_reset = imx95_pcie_core_reset,
+ .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock,
+ .epc_features = &imx95_pcie_epc_features,
+ .mode = DW_PCIE_EP_TYPE,
},
};
-static const struct of_device_id imx6_pcie_of_match[] = {
+static const struct of_device_id imx_pcie_of_match[] = {
{ .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], },
{ .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
{ .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
@@ -1353,22 +1981,29 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
{ .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
{ .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
+ { .compatible = "fsl,imx8q-pcie", .data = &drvdata[IMX8Q], },
+ { .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], },
+ { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
+ { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
+ { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
+ { .compatible = "fsl,imx8q-pcie-ep", .data = &drvdata[IMX8Q_EP], },
+ { .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], },
{},
};
-static struct platform_driver imx6_pcie_driver = {
+static struct platform_driver imx_pcie_driver = {
.driver = {
.name = "imx6q-pcie",
- .of_match_table = imx6_pcie_of_match,
+ .of_match_table = imx_pcie_of_match,
.suppress_bind_attrs = true,
- .pm = &imx6_pcie_pm_ops,
+ .pm = &imx_pcie_pm_ops,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe = imx6_pcie_probe,
- .shutdown = imx6_pcie_shutdown,
+ .probe = imx_pcie_probe,
+ .shutdown = imx_pcie_shutdown,
};
-static void imx6_pcie_quirk(struct pci_dev *dev)
+static void imx_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
struct dw_pcie_rp *pp = bus->sysdata;
@@ -1378,30 +2013,37 @@ static void imx6_pcie_quirk(struct pci_dev *dev)
return;
/* Make sure we only quirk devices associated with this driver */
- if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
+ if (bus->dev.parent->parent->driver != &imx_pcie_driver.driver)
return;
if (pci_is_root_bus(bus)) {
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
/*
* Limit config length to avoid the kernel reading beyond
* the register set and causing an abort on i.MX 6Quad
*/
- if (imx6_pcie->drvdata->dbi_length) {
- dev->cfg_size = imx6_pcie->drvdata->dbi_length;
+ if (imx_pcie->drvdata->dbi_length) {
+ dev->cfg_size = imx_pcie->drvdata->dbi_length;
dev_info(&dev->dev, "Limiting cfg_size to %d\n",
dev->cfg_size);
}
}
}
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
- PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
+ PCI_CLASS_BRIDGE_PCI, 8, imx_pcie_quirk);
-static int __init imx6_pcie_init(void)
+static int __init imx_pcie_init(void)
{
#ifdef CONFIG_ARM
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, imx_pcie_of_match);
+ if (!np)
+ return -ENODEV;
+ of_node_put(np);
+
/*
* Since probe() can be deferred we need to make sure that
* hook_fault_code is not called after __init memory is freed
@@ -1413,6 +2055,6 @@ static int __init imx6_pcie_init(void)
"external abort on non-linefetch");
#endif
- return platform_driver_register(&imx6_pcie_driver);
+ return platform_driver_register(&imx_pcie_driver);
}
-device_initcall(imx6_pcie_init);
+device_initcall(imx_pcie_init);
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 78818853af9e..f86d9111f863 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -17,9 +17,9 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/phy/phy.h>
@@ -35,6 +35,11 @@
#define PCIE_DEVICEID_SHIFT 16
/* Application registers */
+#define PID 0x000
+#define RTL GENMASK(15, 11)
+#define RTL_SHIFT 11
+#define AM6_PCI_PG1_RTL_VER 0x15
+
#define CMD_STATUS 0x004
#define LTSSM_EN_VAL BIT(0)
#define OB_XLAT_EN_VAL BIT(1)
@@ -105,6 +110,8 @@
#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+#define PCI_DEVICE_ID_TI_AM654X 0xb00c
+
struct ks_pcie_of_data {
enum dw_pcie_device_mode mode;
const struct dw_pcie_host_ops *host_ops;
@@ -116,8 +123,7 @@ struct keystone_pcie {
struct dw_pcie *pci;
/* PCI Device ID */
u32 device_id;
- int legacy_host_irqs[PCI_NUM_INTX];
- struct device_node *legacy_intc_np;
+ int intx_host_irqs[PCI_NUM_INTX];
int msi_host_irq;
int num_lanes;
@@ -125,7 +131,7 @@ struct keystone_pcie {
struct phy **phy;
struct device_link **link;
struct device_node *msi_intc_np;
- struct irq_domain *legacy_irq_domain;
+ struct irq_domain *intx_irq_domain;
struct device_node *np;
/* Application register space */
@@ -184,12 +190,6 @@ static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void ks_pcie_msi_mask(struct irq_data *data)
{
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
@@ -242,19 +242,78 @@ static struct irq_chip ks_pcie_msi_irq_chip = {
.name = "KEYSTONE-PCI-MSI",
.irq_ack = ks_pcie_msi_irq_ack,
.irq_compose_msi_msg = ks_pcie_compose_msi_msg,
- .irq_set_affinity = ks_pcie_msi_set_affinity,
.irq_mask = ks_pcie_msi_mask,
.irq_unmask = ks_pcie_msi_unmask,
};
+/**
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (!(val & DBI_CS2));
+}
+
+/**
+ * ks_pcie_clear_dbi_mode() - Disable DBI mode
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val &= ~DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (val & DBI_CS2);
+}
+
static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ /* Configure and set up BAR0 */
+ ks_pcie_set_dbi_mode(ks_pcie);
+
+ /* Enable BAR0 */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
+
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ /*
+ * For BAR0, just setting bus address for inbound writes (MSI) should
+ * be sufficient. Use physical address to avoid any conflicts.
+ */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+
pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
return dw_pcie_allocate_domains(pp);
}
-static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
- int offset)
+static void ks_pcie_handle_intx_irq(struct keystone_pcie *ks_pcie,
+ int offset)
{
struct dw_pcie *pci = ks_pcie->pci;
struct device *dev = pci->dev;
@@ -264,7 +323,7 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
if (BIT(0) & pending) {
dev_dbg(dev, ": irq: irq_offset %d", offset);
- generic_handle_domain_irq(ks_pcie->legacy_irq_domain, offset);
+ generic_handle_domain_irq(ks_pcie->intx_irq_domain, offset);
}
/* EOI the INTx interrupt */
@@ -308,94 +367,56 @@ static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
return IRQ_HANDLED;
}
-static void ks_pcie_ack_legacy_irq(struct irq_data *d)
+static void ks_pcie_ack_intx_irq(struct irq_data *d)
{
}
-static void ks_pcie_mask_legacy_irq(struct irq_data *d)
+static void ks_pcie_mask_intx_irq(struct irq_data *d)
{
}
-static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
+static void ks_pcie_unmask_intx_irq(struct irq_data *d)
{
}
-static struct irq_chip ks_pcie_legacy_irq_chip = {
- .name = "Keystone-PCI-Legacy-IRQ",
- .irq_ack = ks_pcie_ack_legacy_irq,
- .irq_mask = ks_pcie_mask_legacy_irq,
- .irq_unmask = ks_pcie_unmask_legacy_irq,
+static struct irq_chip ks_pcie_intx_irq_chip = {
+ .name = "Keystone-PCI-INTX-IRQ",
+ .irq_ack = ks_pcie_ack_intx_irq,
+ .irq_mask = ks_pcie_mask_intx_irq,
+ .irq_unmask = ks_pcie_unmask_intx_irq,
};
-static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
- unsigned int irq,
- irq_hw_number_t hw_irq)
+static int ks_pcie_init_intx_irq_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hw_irq)
{
- irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
+ irq_set_chip_and_handler(irq, &ks_pcie_intx_irq_chip,
handle_level_irq);
irq_set_chip_data(irq, d->host_data);
return 0;
}
-static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
- .map = ks_pcie_init_legacy_irq_map,
+static const struct irq_domain_ops ks_pcie_intx_irq_domain_ops = {
+ .map = ks_pcie_init_intx_irq_map,
.xlate = irq_domain_xlate_onetwocell,
};
-/**
- * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
- * PCIe host controller driver information.
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- val |= DBI_CS2;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
-
- do {
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- } while (!(val & DBI_CS2));
-}
-
-/**
- * ks_pcie_clear_dbi_mode() - Disable DBI mode
- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
- * PCIe host controller driver information.
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- val &= ~DBI_CS2;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
-
- do {
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- } while (val & DBI_CS2);
-}
-
-static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+static int ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
{
u32 val;
u32 num_viewport = ks_pcie->num_viewport;
struct dw_pcie *pci = ks_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
- u64 start, end;
+ struct resource_entry *entry;
struct resource *mem;
+ u64 start, end;
int i;
- mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
+ entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ mem = entry->res;
start = mem->start;
end = mem->end;
@@ -406,7 +427,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
ks_pcie_clear_dbi_mode(ks_pcie);
if (ks_pcie->is_am6)
- return;
+ return 0;
val = ilog2(OB_WIN_SIZE);
ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
@@ -423,6 +444,8 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
val |= OB_XLAT_EN_VAL;
ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ return 0;
}
static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
@@ -433,6 +456,17 @@ static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
u32 reg;
+ /*
+ * Checking whether the link is up here is a last line of defense
+ * against platforms that forward errors on the system bus as
+ * SError upon PCI configuration transactions issued when the link
+ * is down. This check is racy by definition and does not stop
+ * the system from triggering an SError if the link goes down
+ * after this check is performed.
+ */
+ if (!dw_pcie_link_up(pci))
+ return NULL;
+
reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
CFG_FUNC(PCI_FUNC(devfn));
if (!pci_is_root_bus(bus->parent))
@@ -448,44 +482,10 @@ static struct pci_ops ks_child_pcie_ops = {
.write = pci_generic_config_write,
};
-/**
- * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
- * @bus: A pointer to the PCI bus structure.
- *
- * This sets BAR0 to enable inbound access for MSI_IRQ register
- */
-static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
-{
- struct dw_pcie_rp *pp = bus->sysdata;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- if (!pci_is_root_bus(bus))
- return 0;
-
- /* Configure and set up BAR0 */
- ks_pcie_set_dbi_mode(ks_pcie);
-
- /* Enable BAR0 */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
-
- ks_pcie_clear_dbi_mode(ks_pcie);
-
- /*
- * For BAR0, just setting bus address for inbound writes (MSI) should
- * be sufficient. Use physical address to avoid any conflicts.
- */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
-
- return 0;
-}
-
static struct pci_ops ks_pcie_ops = {
.map_bus = dw_pcie_own_conf_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
- .add_bus = ks_pcie_v3_65_add_bus,
};
/**
@@ -493,13 +493,12 @@ static struct pci_ops ks_pcie_ops = {
* @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
* controller driver information.
*/
-static int ks_pcie_link_up(struct dw_pcie *pci)
+static bool ks_pcie_link_up(struct dw_pcie *pci)
{
u32 val;
val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
- val &= PORT_LOGIC_LTSSM_STATE_MASK;
- return (val == PORT_LOGIC_LTSSM_STATE_L0);
+ return (val & PORT_LOGIC_LTSSM_STATE_MASK) == PORT_LOGIC_LTSSM_STATE_L0;
}
static void ks_pcie_stop_link(struct dw_pcie *pci)
@@ -528,7 +527,11 @@ static int ks_pcie_start_link(struct dw_pcie *pci)
static void ks_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
+ struct keystone_pcie *ks_pcie;
+ struct device *bridge_dev;
struct pci_dev *bridge;
+ u32 val;
+
static const struct pci_device_id rc_pci_devids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
.class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
@@ -540,6 +543,11 @@ static void ks_pcie_quirk(struct pci_dev *dev)
.class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
{ 0, },
};
+ static const struct pci_device_id am6_pci_devids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
+ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ { 0, },
+ };
if (pci_is_root_bus(bus))
bridge = dev;
@@ -561,10 +569,36 @@ static void ks_pcie_quirk(struct pci_dev *dev)
*/
if (pci_match_id(rc_pci_devids, bridge)) {
if (pcie_get_readrq(dev) > 256) {
- dev_info(&dev->dev, "limiting MRRS to 256\n");
+ dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
pcie_set_readrq(dev, 256);
}
}
+
+ /*
+ * Memory transactions fail with PCI controller in AM654 PG1.0
+ * when MRRS is set to more than 128 bytes. Force the MRRS to
+ * 128 bytes in all downstream devices.
+ */
+ if (pci_match_id(am6_pci_devids, bridge)) {
+ bridge_dev = pci_get_host_bridge_device(dev);
+ if (!bridge_dev || !bridge_dev->parent)
+ return;
+
+ ks_pcie = dev_get_drvdata(bridge_dev->parent);
+ if (!ks_pcie)
+ return;
+
+ val = ks_pcie_app_readl(ks_pcie, PID);
+ val &= RTL;
+ val >>= RTL_SHIFT;
+ if (val != AM6_PCI_PG1_RTL_VER)
+ return;
+
+ if (pcie_get_readrq(dev) > 128) {
+ dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
+ pcie_set_readrq(dev, 128);
+ }
+ }
}
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
@@ -606,22 +640,22 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
}
/**
- * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
+ * ks_pcie_intx_irq_handler() - Handle INTX interrupt
* @desc: Pointer to irq descriptor
*
- * Traverse through pending legacy interrupts and invoke handler for each. Also
+ * Traverse through pending INTX interrupts and invoke handler for each. Also
* takes care of interrupt controller level mask/ack operation.
*/
-static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
+static void ks_pcie_intx_irq_handler(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
struct dw_pcie *pci = ks_pcie->pci;
struct device *dev = pci->dev;
- u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
+ u32 irq_offset = irq - ks_pcie->intx_host_irqs[0];
struct irq_chip *chip = irq_desc_get_chip(desc);
- dev_dbg(dev, ": Handling legacy irq %d\n", irq);
+ dev_dbg(dev, ": Handling INTX irq %d\n", irq);
/*
* The chained irq handler installation would have replaced normal
@@ -629,7 +663,7 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
* ack operation.
*/
chained_irq_enter(chip, desc);
- ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
+ ks_pcie_handle_intx_irq(ks_pcie, irq_offset);
chained_irq_exit(chip, desc);
}
@@ -687,10 +721,10 @@ err:
return ret;
}
-static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
+static int ks_pcie_config_intx_irq(struct keystone_pcie *ks_pcie)
{
struct device *dev = ks_pcie->pci->dev;
- struct irq_domain *legacy_irq_domain;
+ struct irq_domain *intx_irq_domain;
struct device_node *np = ks_pcie->np;
struct device_node *intc_np;
int irq_count, irq, ret = 0, i;
@@ -698,7 +732,7 @@ static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
if (!intc_np) {
/*
- * Since legacy interrupts are modeled as edge-interrupts in
+ * Since INTX interrupts are modeled as edge-interrupts in
* AM6, keep it disabled for now.
*/
if (ks_pcie->is_am6)
@@ -720,22 +754,21 @@ static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
ret = -EINVAL;
goto err;
}
- ks_pcie->legacy_host_irqs[i] = irq;
+ ks_pcie->intx_host_irqs[i] = irq;
irq_set_chained_handler_and_data(irq,
- ks_pcie_legacy_irq_handler,
+ ks_pcie_intx_irq_handler,
ks_pcie);
}
- legacy_irq_domain =
- irq_domain_add_linear(intc_np, PCI_NUM_INTX,
- &ks_pcie_legacy_irq_domain_ops, NULL);
- if (!legacy_irq_domain) {
- dev_err(dev, "Failed to add irq domain for legacy irqs\n");
+ intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(intc_np), PCI_NUM_INTX,
+ &ks_pcie_intx_irq_domain_ops, NULL);
+ if (!intx_irq_domain) {
+ dev_err(dev, "Failed to add irq domain for INTX irqs\n");
ret = -EINVAL;
goto err;
}
- ks_pcie->legacy_irq_domain = legacy_irq_domain;
+ ks_pcie->intx_irq_domain = intx_irq_domain;
for (i = 0; i < PCI_NUM_INTX; i++)
ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
@@ -745,29 +778,7 @@ err:
return ret;
}
-#ifdef CONFIG_ARM
-/*
- * When a PCI device does not exist during config cycles, keystone host
- * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE).
- * This handler always returns 0 for this kind of fault.
- */
-static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
- struct pt_regs *regs)
-{
- unsigned long instr = *(unsigned long *) instruction_pointer(regs);
-
- if ((instr & 0x0e100090) == 0x00100090) {
- int reg = (instr >> 12) & 15;
-
- regs->uregs[reg] = -1;
- regs->ARM_pc += 4;
- }
-
- return 0;
-}
-#endif
-
-static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
+static int ks_pcie_init_id(struct keystone_pcie *ks_pcie)
{
int ret;
unsigned int id;
@@ -799,7 +810,7 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
return 0;
}
-static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
+static int ks_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
@@ -809,7 +820,7 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
if (!ks_pcie->is_am6)
pp->bridge->child_ops = &ks_child_pcie_ops;
- ret = ks_pcie_config_legacy_irq(ks_pcie);
+ ret = ks_pcie_config_intx_irq(ks_pcie);
if (ret)
return ret;
@@ -818,7 +829,10 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
return ret;
ks_pcie_stop_link(pci);
- ks_pcie_setup_rc_app_regs(ks_pcie);
+ ret = ks_pcie_setup_rc_app_regs(ks_pcie);
+ if (ret)
+ return ret;
+
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
pci->dbi_base + PCI_IO_BASE);
@@ -826,25 +840,16 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
if (ret < 0)
return ret;
-#ifdef CONFIG_ARM
- /*
- * PCIe access errors that result into OCP errors are caught by ARM as
- * "External aborts"
- */
- hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
- "Asynchronous external abort");
-#endif
-
return 0;
}
static const struct dw_pcie_host_ops ks_pcie_host_ops = {
- .host_init = ks_pcie_host_init,
- .msi_host_init = ks_pcie_msi_host_init,
+ .init = ks_pcie_host_init,
+ .msi_init = ks_pcie_msi_host_init,
};
static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
- .host_init = ks_pcie_host_init,
+ .init = ks_pcie_host_init,
};
static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
@@ -882,7 +887,7 @@ static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
}
-static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
+static void ks_pcie_am654_raise_intx_irq(struct keystone_pcie *ks_pcie)
{
struct dw_pcie *pci = ks_pcie->pci;
u8 int_pin;
@@ -901,20 +906,19 @@ static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
}
static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- ks_pcie_am654_raise_legacy_irq(ks_pcie);
+ case PCI_IRQ_INTX:
+ ks_pcie_am654_raise_intx_irq(ks_pcie);
break;
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
break;
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
break;
default:
@@ -926,16 +930,15 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features ks_pcie_am654_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
- .reserved_bar = 1 << BAR_0 | 1 << BAR_1,
- .bar_fixed_64bit = 1 << BAR_0,
- .bar_fixed_size[2] = SZ_1M,
- .bar_fixed_size[3] = SZ_64K,
- .bar_fixed_size[4] = 256,
- .bar_fixed_size[5] = SZ_1M,
- .align = SZ_1M,
+ .bar[BAR_0] = { .type = BAR_RESERVED, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256, },
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
+ .align = SZ_64K,
};
static const struct pci_epc_features*
@@ -945,7 +948,7 @@ ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
- .ep_init = ks_pcie_am654_ep_init,
+ .init = ks_pcie_am654_ep_init,
.raise_irq = ks_pcie_am654_raise_irq,
.get_features = &ks_pcie_am654_get_features,
};
@@ -1069,6 +1072,7 @@ static int ks_pcie_am654_set_mode(struct device *dev,
static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
.host_ops = &ks_pcie_host_ops,
+ .mode = DW_PCIE_RC_TYPE,
.version = DW_PCIE_VER_365A,
};
@@ -1100,8 +1104,9 @@ static const struct of_device_id ks_pcie_of_match[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, ks_pcie_of_match);
-static int __init ks_pcie_probe(struct platform_device *pdev)
+static int ks_pcie_probe(struct platform_device *pdev)
{
const struct dw_pcie_host_ops *host_ops;
const struct dw_pcie_ep_ops *ep_ops;
@@ -1166,8 +1171,8 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
- "ks-pcie-error-irq", ks_pcie);
+ ret = devm_request_irq(dev, irq, ks_pcie_err_irq_handler, IRQF_SHARED,
+ "ks-pcie-error-irq", ks_pcie);
if (ret < 0) {
dev_err(dev, "failed to request error IRQ %d\n",
irq);
@@ -1178,11 +1183,11 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
if (ret)
num_lanes = 1;
- phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
+ phy = devm_kcalloc(dev, num_lanes, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
+ link = devm_kcalloc(dev, num_lanes, sizeof(*link), GFP_KERNEL);
if (!link)
return -ENOMEM;
@@ -1219,7 +1224,16 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
goto err_link;
}
+ /* Obtain references to the PHYs */
+ for (i = 0; i < num_lanes; i++)
+ phy_pm_runtime_get_sync(ks_pcie->phy[i]);
+
ret = ks_pcie_enable_phy(ks_pcie);
+
+ /* Release references to the PHYs */
+ for (i = 0; i < num_lanes; i++)
+ phy_pm_runtime_put_sync(ks_pcie->phy[i]);
+
if (ret) {
dev_err(dev, "failed to enable phy\n");
goto err_link;
@@ -1282,15 +1296,28 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
ret = dw_pcie_ep_init(&pci->ep);
if (ret < 0)
goto err_get_sync;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ goto err_ep_init;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
break;
default:
dev_err(dev, "INVALID device type %d\n", mode);
+ ret = -EINVAL;
+ goto err_get_sync;
}
ks_pcie_enable_error_irq(ks_pcie);
return 0;
+err_ep_init:
+ dw_pcie_ep_deinit(&pci->ep);
err_get_sync:
pm_runtime_put(dev);
pm_runtime_disable(dev);
@@ -1303,7 +1330,7 @@ err_link:
return ret;
}
-static int __exit ks_pcie_remove(struct platform_device *pdev)
+static void ks_pcie_remove(struct platform_device *pdev)
{
struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
struct device_link **link = ks_pcie->link;
@@ -1315,16 +1342,55 @@ static int __exit ks_pcie_remove(struct platform_device *pdev)
ks_pcie_disable_phy(ks_pcie);
while (num_lanes--)
device_link_del(link[num_lanes]);
-
- return 0;
}
-static struct platform_driver ks_pcie_driver __refdata = {
+static struct platform_driver ks_pcie_driver = {
.probe = ks_pcie_probe,
- .remove = __exit_p(ks_pcie_remove),
+ .remove = ks_pcie_remove,
.driver = {
.name = "keystone-pcie",
.of_match_table = ks_pcie_of_match,
},
};
+
+#ifdef CONFIG_ARM
+/*
+ * When a PCI device does not exist during config cycles, keystone host
+ * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE).
+ * This handler always returns 0 for this kind of fault.
+ */
+static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ unsigned long instr = *(unsigned long *)instruction_pointer(regs);
+
+ if ((instr & 0x0e100090) == 0x00100090) {
+ int reg = (instr >> 12) & 15;
+
+ regs->uregs[reg] = -1;
+ regs->ARM_pc += 4;
+ }
+
+ return 0;
+}
+
+static int __init ks_pcie_init(void)
+{
+ /*
+ * PCIe access errors that result into OCP errors are caught by ARM as
+ * "External aborts"
+ */
+ if (of_find_matching_node(NULL, ks_pcie_of_match))
+ hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
+ "Asynchronous external abort");
+
+ return platform_driver_register(&ks_pcie_driver);
+}
+device_initcall(ks_pcie_init);
+#else
builtin_platform_driver(ks_pcie_driver);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCIe controller driver for Texas Instruments Keystone SoCs");
+MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index ad99707b3b99..a4a800699f89 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -18,6 +18,20 @@
#include "pcie-designware.h"
+#define PEX_PF0_CONFIG 0xC0014
+#define PEX_PF0_CFG_READY BIT(0)
+
+/* PEX PFa PCIE PME and message interrupt registers*/
+#define PEX_PF0_PME_MES_DR 0xC0020
+#define PEX_PF0_PME_MES_DR_LUD BIT(7)
+#define PEX_PF0_PME_MES_DR_LDD BIT(9)
+#define PEX_PF0_PME_MES_DR_HRD BIT(10)
+
+#define PEX_PF0_PME_MES_IER 0xC0028
+#define PEX_PF0_PME_MES_IER_LUDIE BIT(7)
+#define PEX_PF0_PME_MES_IER_LDDIE BIT(9)
+#define PEX_PF0_PME_MES_IER_HRDIE BIT(10)
+
#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
struct ls_pcie_ep_drvdata {
@@ -30,8 +44,100 @@ struct ls_pcie_ep {
struct dw_pcie *pci;
struct pci_epc_features *ls_epc;
const struct ls_pcie_ep_drvdata *drvdata;
+ int irq;
+ u32 lnkcap;
+ bool big_endian;
};
+static u32 ls_pcie_pf_lut_readl(struct ls_pcie_ep *pcie, u32 offset)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pcie->big_endian)
+ return ioread32be(pci->dbi_base + offset);
+ else
+ return ioread32(pci->dbi_base + offset);
+}
+
+static void ls_pcie_pf_lut_writel(struct ls_pcie_ep *pcie, u32 offset, u32 value)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pcie->big_endian)
+ iowrite32be(value, pci->dbi_base + offset);
+ else
+ iowrite32(value, pci->dbi_base + offset);
+}
+
+static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
+{
+ struct ls_pcie_ep *pcie = dev_id;
+ struct dw_pcie *pci = pcie->pci;
+ u32 val, cfg;
+ u8 offset;
+
+ val = ls_pcie_pf_lut_readl(pcie, PEX_PF0_PME_MES_DR);
+ ls_pcie_pf_lut_writel(pcie, PEX_PF0_PME_MES_DR, val);
+
+ if (!val)
+ return IRQ_NONE;
+
+ if (val & PEX_PF0_PME_MES_DR_LUD) {
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+
+ /*
+ * The values of the Maximum Link Width and Supported Link
+ * Speed from the Link Capabilities Register will be lost
+ * during link down or hot reset. Restore initial value
+ * that configured by the Reset Configuration Word (RCW).
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, pcie->lnkcap);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ cfg = ls_pcie_pf_lut_readl(pcie, PEX_PF0_CONFIG);
+ cfg |= PEX_PF0_CFG_READY;
+ ls_pcie_pf_lut_writel(pcie, PEX_PF0_CONFIG, cfg);
+ dw_pcie_ep_linkup(&pci->ep);
+
+ dev_dbg(pci->dev, "Link up\n");
+ } else if (val & PEX_PF0_PME_MES_DR_LDD) {
+ dev_dbg(pci->dev, "Link down\n");
+ dw_pcie_ep_linkdown(&pci->ep);
+ } else if (val & PEX_PF0_PME_MES_DR_HRD) {
+ dev_dbg(pci->dev, "Hot reset\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ls_pcie_ep_interrupt_init(struct ls_pcie_ep *pcie,
+ struct platform_device *pdev)
+{
+ u32 val;
+ int ret;
+
+ pcie->irq = platform_get_irq_byname(pdev, "pme");
+ if (pcie->irq < 0)
+ return pcie->irq;
+
+ ret = devm_request_irq(&pdev->dev, pcie->irq, ls_pcie_ep_event_handler,
+ IRQF_SHARED, pdev->name, pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register PCIe IRQ\n");
+ return ret;
+ }
+
+ /* Enable interrupts */
+ val = ls_pcie_pf_lut_readl(pcie, PEX_PF0_PME_MES_IER);
+ val |= PEX_PF0_PME_MES_IER_LDDIE | PEX_PF0_PME_MES_IER_HRDIE |
+ PEX_PF0_PME_MES_IER_LUDIE;
+ ls_pcie_pf_lut_writel(pcie, PEX_PF0_PME_MES_IER, val);
+
+ return 0;
+}
+
static const struct pci_epc_features*
ls_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
@@ -60,16 +166,16 @@ static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
}
static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return dw_pcie_ep_raise_legacy_irq(ep, func_no);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return dw_pcie_ep_raise_msix_irq_doorbell(ep, func_no,
interrupt_num);
default:
@@ -78,8 +184,7 @@ static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
}
-static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep,
- u8 func_no)
+static unsigned int ls_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep, u8 func_no)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
@@ -89,10 +194,10 @@ static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep,
}
static const struct dw_pcie_ep_ops ls_pcie_ep_ops = {
- .ep_init = ls_pcie_ep_init,
+ .init = ls_pcie_ep_init,
.raise_irq = ls_pcie_ep_raise_irq,
.get_features = ls_pcie_ep_get_features,
- .func_conf_select = ls_pcie_ep_func_conf_select,
+ .get_dbi_offset = ls_pcie_ep_get_dbi_offset,
};
static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = {
@@ -110,6 +215,7 @@ static const struct ls_pcie_ep_drvdata lx2_ep_drvdata = {
};
static const struct of_device_id ls_pcie_ep_of_match[] = {
+ { .compatible = "fsl,ls1028a-pcie-ep", .data = &ls1_ep_drvdata },
{ .compatible = "fsl,ls1046a-pcie-ep", .data = &ls1_ep_drvdata },
{ .compatible = "fsl,ls1088a-pcie-ep", .data = &ls2_ep_drvdata },
{ .compatible = "fsl,ls2088a-pcie-ep", .data = &ls2_ep_drvdata },
@@ -124,6 +230,8 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
struct ls_pcie_ep *pcie;
struct pci_epc_features *ls_epc;
struct resource *dbi_base;
+ u8 offset;
+ int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -142,7 +250,11 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = pcie->drvdata->dw_pcie_ops;
- ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4);
+ ls_epc->bar[BAR_2].only_64bit = true;
+ ls_epc->bar[BAR_3].type = BAR_RESERVED;
+ ls_epc->bar[BAR_4].only_64bit = true;
+ ls_epc->bar[BAR_5].type = BAR_RESERVED;
+ ls_epc->linkup_notifier = true;
pcie->pci = pci;
pcie->ls_epc = ls_epc;
@@ -154,9 +266,29 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
pci->ep.ops = &ls_pcie_ep_ops;
+ pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian");
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+
platform_set_drvdata(pdev, pcie);
- return dw_pcie_ep_init(&pci->ep);
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ pcie->lnkcap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
+ return ls_pcie_ep_interrupt_init(pcie, pdev);
}
static struct platform_driver ls_pcie_ep_driver = {
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index ed5fb492fe08..a44b5c256d6e 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -8,9 +8,11 @@
* Author: Minghuan Lian <Minghuan.Lian@freescale.com>
*/
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
@@ -20,6 +22,7 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include "../../pci.h"
#include "pcie-designware.h"
/* PEX Internal Configuration Registers */
@@ -27,12 +30,46 @@
#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */
+/* PF Message Command Register */
+#define LS_PCIE_PF_MCR 0x2c
+#define PF_MCR_PTOMR BIT(0)
+#define PF_MCR_EXL2S BIT(1)
+
+/* LS1021A PEXn PM Write Control Register */
+#define SCFG_PEXPMWRCR(idx) (0x5c + (idx) * 0x64)
+#define PMXMTTURNOFF BIT(31)
+#define SCFG_PEXSFTRSTCR 0x190
+#define PEXSR(idx) BIT(idx)
+
+/* LS1043A PEX PME control register */
+#define SCFG_PEXPMECR 0x144
+#define PEXPME(idx) BIT(31 - (idx) * 4)
+
+/* LS1043A PEX LUT debug register */
+#define LS_PCIE_LDBG 0x7fc
+#define LDBG_SR BIT(30)
+#define LDBG_WE BIT(31)
+
#define PCIE_IATU_NUM 6
+struct ls_pcie_drvdata {
+ const u32 pf_lut_off;
+ const struct dw_pcie_host_ops *ops;
+ int (*exit_from_l2)(struct dw_pcie_rp *pp);
+ bool scfg_support;
+ bool pm_support;
+};
+
struct ls_pcie {
struct dw_pcie *pci;
+ const struct ls_pcie_drvdata *drvdata;
+ void __iomem *pf_lut_base;
+ struct regmap *scfg;
+ int index;
+ bool big_endian;
};
+#define ls_pcie_pf_lut_readl_addr(addr) ls_pcie_pf_lut_readl(pcie, addr)
#define to_ls_pcie(x) dev_get_drvdata((x)->dev)
static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
@@ -41,7 +78,7 @@ static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
u32 header_type;
header_type = ioread8(pci->dbi_base + PCI_HEADER_TYPE);
- header_type &= 0x7f;
+ header_type &= PCI_HEADER_TYPE_MASK;
return header_type == PCI_HEADER_TYPE_BRIDGE;
}
@@ -73,6 +110,70 @@ static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
}
+static u32 ls_pcie_pf_lut_readl(struct ls_pcie *pcie, u32 off)
+{
+ if (pcie->big_endian)
+ return ioread32be(pcie->pf_lut_base + off);
+
+ return ioread32(pcie->pf_lut_base + off);
+}
+
+static void ls_pcie_pf_lut_writel(struct ls_pcie *pcie, u32 off, u32 val)
+{
+ if (pcie->big_endian)
+ iowrite32be(val, pcie->pf_lut_base + off);
+ else
+ iowrite32(val, pcie->pf_lut_base + off);
+}
+
+static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+ u32 val;
+ int ret;
+
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_PF_MCR);
+ val |= PF_MCR_PTOMR;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_PF_MCR, val);
+
+ ret = readx_poll_timeout(ls_pcie_pf_lut_readl_addr, LS_PCIE_PF_MCR,
+ val, !(val & PF_MCR_PTOMR),
+ PCIE_PME_TO_L2_TIMEOUT_US/10,
+ PCIE_PME_TO_L2_TIMEOUT_US);
+ if (ret)
+ dev_err(pcie->pci->dev, "PME_Turn_off timeout\n");
+}
+
+static int ls_pcie_exit_from_l2(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+ u32 val;
+ int ret;
+
+ /*
+ * Set PF_MCR_EXL2S bit in LS_PCIE_PF_MCR register for the link
+ * to exit L2 state.
+ */
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_PF_MCR);
+ val |= PF_MCR_EXL2S;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_PF_MCR, val);
+
+ /*
+ * L2 exit timeout of 10ms is not defined in the specifications,
+ * it was chosen based on empirical observations.
+ */
+ ret = readx_poll_timeout(ls_pcie_pf_lut_readl_addr, LS_PCIE_PF_MCR,
+ val, !(val & PF_MCR_EXL2S),
+ 1000,
+ 10000);
+ if (ret)
+ dev_err(pcie->pci->dev, "L2 exit timeout\n");
+
+ return ret;
+}
+
static int ls_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -89,20 +190,135 @@ static int ls_pcie_host_init(struct dw_pcie_rp *pp)
return 0;
}
+static void scfg_pcie_send_turnoff_msg(struct regmap *scfg, u32 reg, u32 mask)
+{
+ /* Send PME_Turn_Off message */
+ regmap_write_bits(scfg, reg, mask, mask);
+
+ /*
+ * There is no specific register to check for PME_To_Ack from endpoint.
+ * So on the safe side, wait for PCIE_PME_TO_L2_TIMEOUT_US.
+ */
+ mdelay(PCIE_PME_TO_L2_TIMEOUT_US/1000);
+
+ /*
+ * Layerscape hardware reference manual recommends clearing the PMXMTTURNOFF bit
+ * to complete the PME_Turn_Off handshake.
+ */
+ regmap_write_bits(scfg, reg, mask, 0);
+}
+
+static void ls1021a_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+
+ scfg_pcie_send_turnoff_msg(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), PMXMTTURNOFF);
+}
+
+static int scfg_pcie_exit_from_l2(struct regmap *scfg, u32 reg, u32 mask)
+{
+ /* Reset the PEX wrapper to bring the link out of L2 */
+ regmap_write_bits(scfg, reg, mask, mask);
+ regmap_write_bits(scfg, reg, mask, 0);
+
+ return 0;
+}
+
+static int ls1021a_pcie_exit_from_l2(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+
+ return scfg_pcie_exit_from_l2(pcie->scfg, SCFG_PEXSFTRSTCR, PEXSR(pcie->index));
+}
+
+static void ls1043a_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+
+ scfg_pcie_send_turnoff_msg(pcie->scfg, SCFG_PEXPMECR, PEXPME(pcie->index));
+}
+
+static int ls1043a_pcie_exit_from_l2(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+ u32 val;
+
+ /*
+ * Reset the PEX wrapper to bring the link out of L2.
+ * LDBG_WE: allows the user to have write access to the PEXDBG[SR] for both setting and
+ * clearing the soft reset on the PEX module.
+ * LDBG_SR: When SR is set to 1, the PEX module enters soft reset.
+ */
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+ val |= LDBG_WE;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+ val |= LDBG_SR;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+ val &= ~LDBG_SR;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+ val &= ~LDBG_WE;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+ return 0;
+}
+
static const struct dw_pcie_host_ops ls_pcie_host_ops = {
- .host_init = ls_pcie_host_init,
+ .init = ls_pcie_host_init,
+ .pme_turn_off = ls_pcie_send_turnoff_msg,
+};
+
+static const struct dw_pcie_host_ops ls1021a_pcie_host_ops = {
+ .init = ls_pcie_host_init,
+ .pme_turn_off = ls1021a_pcie_send_turnoff_msg,
+};
+
+static const struct ls_pcie_drvdata ls1021a_drvdata = {
+ .pm_support = true,
+ .scfg_support = true,
+ .ops = &ls1021a_pcie_host_ops,
+ .exit_from_l2 = ls1021a_pcie_exit_from_l2,
+};
+
+static const struct dw_pcie_host_ops ls1043a_pcie_host_ops = {
+ .init = ls_pcie_host_init,
+ .pme_turn_off = ls1043a_pcie_send_turnoff_msg,
+};
+
+static const struct ls_pcie_drvdata ls1043a_drvdata = {
+ .pf_lut_off = 0x10000,
+ .pm_support = true,
+ .scfg_support = true,
+ .ops = &ls1043a_pcie_host_ops,
+ .exit_from_l2 = ls1043a_pcie_exit_from_l2,
+};
+
+static const struct ls_pcie_drvdata layerscape_drvdata = {
+ .pf_lut_off = 0xc0000,
+ .pm_support = true,
+ .ops = &ls_pcie_host_ops,
+ .exit_from_l2 = ls_pcie_exit_from_l2,
};
static const struct of_device_id ls_pcie_of_match[] = {
- { .compatible = "fsl,ls1012a-pcie", },
- { .compatible = "fsl,ls1021a-pcie", },
- { .compatible = "fsl,ls1028a-pcie", },
- { .compatible = "fsl,ls1043a-pcie", },
- { .compatible = "fsl,ls1046a-pcie", },
- { .compatible = "fsl,ls2080a-pcie", },
- { .compatible = "fsl,ls2085a-pcie", },
- { .compatible = "fsl,ls2088a-pcie", },
- { .compatible = "fsl,ls1088a-pcie", },
+ { .compatible = "fsl,ls1012a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1021a-pcie", .data = &ls1021a_drvdata },
+ { .compatible = "fsl,ls1028a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1043a-pcie", .data = &ls1043a_drvdata },
+ { .compatible = "fsl,ls1046a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2080a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2085a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2088a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1088a-pcie", .data = &layerscape_drvdata },
{ },
};
@@ -112,6 +328,7 @@ static int ls_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
struct ls_pcie *pcie;
struct resource *dbi_base;
+ u32 index[2];
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -121,16 +338,34 @@ static int ls_pcie_probe(struct platform_device *pdev)
if (!pci)
return -ENOMEM;
- pci->dev = dev;
- pci->pp.ops = &ls_pcie_host_ops;
+ pcie->drvdata = of_device_get_match_data(dev);
+ pci->dev = dev;
pcie->pci = pci;
+ pci->pp.ops = pcie->drvdata->ops;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
+ pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian");
+
+ pcie->pf_lut_base = pci->dbi_base + pcie->drvdata->pf_lut_off;
+
+ if (pcie->drvdata->scfg_support) {
+ pcie->scfg =
+ syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "fsl,pcie-scfg", 1,
+ index);
+ if (IS_ERR(pcie->scfg)) {
+ dev_err(dev, "No syscfg phandle specified\n");
+ return PTR_ERR(pcie->scfg);
+ }
+
+ pcie->index = index[1];
+ }
+
if (!ls_pcie_is_bridge(pcie))
return -ENODEV;
@@ -139,12 +374,42 @@ static int ls_pcie_probe(struct platform_device *pdev)
return dw_pcie_host_init(&pci->pp);
}
+static int ls_pcie_suspend_noirq(struct device *dev)
+{
+ struct ls_pcie *pcie = dev_get_drvdata(dev);
+
+ if (!pcie->drvdata->pm_support)
+ return 0;
+
+ return dw_pcie_suspend_noirq(pcie->pci);
+}
+
+static int ls_pcie_resume_noirq(struct device *dev)
+{
+ struct ls_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ if (!pcie->drvdata->pm_support)
+ return 0;
+
+ ret = pcie->drvdata->exit_from_l2(&pcie->pci->pp);
+ if (ret)
+ return ret;
+
+ return dw_pcie_resume_noirq(pcie->pci);
+}
+
+static const struct dev_pm_ops ls_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(ls_pcie_suspend_noirq, ls_pcie_resume_noirq)
+};
+
static struct platform_driver ls_pcie_driver = {
.probe = ls_pcie_probe,
.driver = {
.name = "layerscape-pcie",
.of_match_table = ls_pcie_of_match,
.suppress_bind_attrs = true,
+ .pm = &ls_pcie_pm_ops,
},
};
builtin_platform_driver(ls_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index c1527693bed9..54b6a4196f17 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -9,14 +9,13 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/resource.h>
#include <linux/types.h>
#include <linux/phy/phy.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include "pcie-designware.h"
@@ -109,10 +108,22 @@ static int meson_pcie_get_mems(struct platform_device *pdev,
struct meson_pcie *mp)
{
struct dw_pcie *pci = &mp->pci;
+ struct resource *res;
- pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi");
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
+ /*
+ * For the broken DTs that supply 'dbi' as 'elbi', parse the 'elbi'
+ * region and assign it to both 'pci->elbi_base' and 'pci->dbi_space' so
+ * that the DWC core can skip parsing both regions.
+ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ if (res) {
+ pci->elbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
+ if (IS_ERR(pci->elbi_base))
+ return PTR_ERR(pci->elbi_base);
+
+ pci->dbi_base = pci->elbi_base;
+ pci->dbi_phys_addr = res->start;
+ }
mp->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
if (IS_ERR(mp->cfg_base))
@@ -163,6 +174,13 @@ static int meson_pcie_reset(struct meson_pcie *mp)
return 0;
}
+static inline void meson_pcie_disable_clock(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
static inline struct clk *meson_pcie_probe_clock(struct device *dev,
const char *id, u64 rate)
{
@@ -187,9 +205,7 @@ static inline struct clk *meson_pcie_probe_clock(struct device *dev,
return ERR_PTR(ret);
}
- devm_add_action_or_reset(dev,
- (void (*) (void *))clk_disable_unprepare,
- clk);
+ devm_add_action_or_reset(dev, meson_pcie_disable_clock, clk);
return clk;
}
@@ -331,7 +347,7 @@ static struct pci_ops meson_pci_ops = {
.write = pci_generic_config_write,
};
-static int meson_pcie_link_up(struct dw_pcie *pci)
+static bool meson_pcie_link_up(struct dw_pcie *pci)
{
struct meson_pcie *mp = to_meson_pcie(pci);
struct device *dev = pci->dev;
@@ -359,7 +375,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci)
dev_dbg(dev, "speed_okay\n");
if (smlh_up && rdlh_up && ltssm_up && speed_okay)
- return 1;
+ return true;
cnt++;
@@ -367,7 +383,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci)
} while (cnt < WAIT_LINKUP_TIMEOUT);
dev_err(dev, "error: wait linkup timeout\n");
- return 0;
+ return false;
}
static int meson_pcie_host_init(struct dw_pcie_rp *pp)
@@ -384,7 +400,7 @@ static int meson_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops meson_pcie_host_ops = {
- .host_init = meson_pcie_host_init,
+ .init = meson_pcie_host_init,
};
static const struct dw_pcie_ops dw_pcie_ops = {
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index b8cb77c9c4bd..345c281c74fe 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -242,18 +242,24 @@ static struct pci_ops al_child_pci_ops = {
.write = pci_generic_config_write,
};
-static void al_pcie_config_prepare(struct al_pcie *pcie)
+static int al_pcie_config_prepare(struct al_pcie *pcie)
{
struct al_pcie_target_bus_cfg *target_bus_cfg;
struct dw_pcie_rp *pp = &pcie->pci->pp;
unsigned int ecam_bus_mask;
+ struct resource_entry *ft;
u32 cfg_control_offset;
+ struct resource *bus;
u8 subordinate_bus;
u8 secondary_bus;
u32 cfg_control;
u32 reg;
- struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
+ ft = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
+ if (!ft)
+ return -ENODEV;
+
+ bus = ft->res;
target_bus_cfg = &pcie->target_bus_cfg;
ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1;
@@ -287,6 +293,8 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
al_pcie_controller_writel(pcie, cfg_control_offset, reg);
+
+ return 0;
}
static int al_pcie_host_init(struct dw_pcie_rp *pp)
@@ -305,13 +313,15 @@ static int al_pcie_host_init(struct dw_pcie_rp *pp)
if (rc)
return rc;
- al_pcie_config_prepare(pcie);
+ rc = al_pcie_config_prepare(pcie);
+ if (rc)
+ return rc;
return 0;
}
static const struct dw_pcie_host_ops al_pcie_host_ops = {
- .host_init = al_pcie_host_init,
+ .init = al_pcie_host_init,
};
static int al_pcie_probe(struct platform_device *pdev)
@@ -342,6 +352,7 @@ static int al_pcie_probe(struct platform_device *pdev)
return -ENOENT;
}
al_pcie->ecam_size = resource_size(ecam_res);
+ pci->pp.native_ecam = true;
controller_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"controller");
diff --git a/drivers/pci/controller/dwc/pcie-amd-mdb.c b/drivers/pci/controller/dwc/pcie-amd-mdb.c
new file mode 100644
index 000000000000..3c6e837465bb
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-amd-mdb.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for AMD MDB PCIe Bridge
+ *
+ * Copyright (C) 2024-2025, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+#define AMD_MDB_TLP_IR_STATUS_MISC 0x4C0
+#define AMD_MDB_TLP_IR_MASK_MISC 0x4C4
+#define AMD_MDB_TLP_IR_ENABLE_MISC 0x4C8
+#define AMD_MDB_TLP_IR_DISABLE_MISC 0x4CC
+
+#define AMD_MDB_TLP_PCIE_INTX_MASK GENMASK(23, 16)
+
+#define AMD_MDB_PCIE_INTR_INTX_ASSERT(x) BIT((x) * 2)
+
+/* Interrupt registers definitions. */
+#define AMD_MDB_PCIE_INTR_CMPL_TIMEOUT 15
+#define AMD_MDB_PCIE_INTR_INTX 16
+#define AMD_MDB_PCIE_INTR_PM_PME_RCVD 24
+#define AMD_MDB_PCIE_INTR_PME_TO_ACK_RCVD 25
+#define AMD_MDB_PCIE_INTR_MISC_CORRECTABLE 26
+#define AMD_MDB_PCIE_INTR_NONFATAL 27
+#define AMD_MDB_PCIE_INTR_FATAL 28
+
+#define IMR(x) BIT(AMD_MDB_PCIE_INTR_ ##x)
+#define AMD_MDB_PCIE_IMR_ALL_MASK \
+ ( \
+ IMR(CMPL_TIMEOUT) | \
+ IMR(PM_PME_RCVD) | \
+ IMR(PME_TO_ACK_RCVD) | \
+ IMR(MISC_CORRECTABLE) | \
+ IMR(NONFATAL) | \
+ IMR(FATAL) | \
+ AMD_MDB_TLP_PCIE_INTX_MASK \
+ )
+
+/**
+ * struct amd_mdb_pcie - PCIe port information
+ * @pci: DesignWare PCIe controller structure
+ * @slcr: MDB System Level Control and Status Register (SLCR) base
+ * @intx_domain: INTx IRQ domain pointer
+ * @mdb_domain: MDB IRQ domain pointer
+ * @perst_gpio: GPIO descriptor for PERST# signal handling
+ * @intx_irq: INTx IRQ interrupt number
+ */
+struct amd_mdb_pcie {
+ struct dw_pcie pci;
+ void __iomem *slcr;
+ struct irq_domain *intx_domain;
+ struct irq_domain *mdb_domain;
+ struct gpio_desc *perst_gpio;
+ int intx_irq;
+};
+
+static const struct dw_pcie_host_ops amd_mdb_pcie_host_ops = {
+};
+
+static void amd_mdb_intx_irq_mask(struct irq_data *data)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
+ AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
+
+ /*
+ * Writing '1' to a bit in AMD_MDB_TLP_IR_DISABLE_MISC disables that
+ * interrupt, writing '0' has no effect.
+ */
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void amd_mdb_intx_irq_unmask(struct irq_data *data)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
+ AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
+
+ /*
+ * Writing '1' to a bit in AMD_MDB_TLP_IR_ENABLE_MISC enables that
+ * interrupt, writing '0' has no effect.
+ */
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip amd_mdb_intx_irq_chip = {
+ .name = "AMD MDB INTx",
+ .irq_mask = amd_mdb_intx_irq_mask,
+ .irq_unmask = amd_mdb_intx_irq_unmask,
+};
+
+/**
+ * amd_mdb_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: Hardware interrupt number
+ *
+ * Return: Always returns '0'.
+ */
+static int amd_mdb_pcie_intx_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &amd_mdb_intx_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+/* INTx IRQ domain operations. */
+static const struct irq_domain_ops amd_intx_domain_ops = {
+ .map = amd_mdb_pcie_intx_map,
+};
+
+static irqreturn_t dw_pcie_rp_intx(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ unsigned long val;
+ int i, int_status;
+
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ int_status = FIELD_GET(AMD_MDB_TLP_PCIE_INTX_MASK, val);
+
+ for (i = 0; i < PCI_NUM_INTX; i++) {
+ if (int_status & AMD_MDB_PCIE_INTR_INTX_ASSERT(i))
+ generic_handle_domain_irq(pcie->intx_domain, i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define _IC(x, s)[AMD_MDB_PCIE_INTR_ ## x] = { __stringify(x), s }
+
+static const struct {
+ const char *sym;
+ const char *str;
+} intr_cause[32] = {
+ _IC(CMPL_TIMEOUT, "Completion timeout"),
+ _IC(PM_PME_RCVD, "PM_PME message received"),
+ _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"),
+ _IC(MISC_CORRECTABLE, "Correctable error message"),
+ _IC(NONFATAL, "Non fatal error message"),
+ _IC(FATAL, "Fatal error message"),
+};
+
+static void amd_mdb_event_irq_mask(struct irq_data *d)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = BIT(d->hwirq);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void amd_mdb_event_irq_unmask(struct irq_data *d)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = BIT(d->hwirq);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip amd_mdb_event_irq_chip = {
+ .name = "AMD MDB RC-Event",
+ .irq_mask = amd_mdb_event_irq_mask,
+ .irq_unmask = amd_mdb_event_irq_unmask,
+};
+
+static int amd_mdb_pcie_event_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &amd_mdb_event_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops event_domain_ops = {
+ .map = amd_mdb_pcie_event_map,
+};
+
+static irqreturn_t amd_mdb_pcie_event(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ unsigned long val;
+ int i;
+
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ val &= ~readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_MASK_MISC);
+ for_each_set_bit(i, &val, 32)
+ generic_handle_domain_irq(pcie->mdb_domain, i);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+
+ return IRQ_HANDLED;
+}
+
+static void amd_mdb_pcie_free_irq_domains(struct amd_mdb_pcie *pcie)
+{
+ if (pcie->intx_domain) {
+ irq_domain_remove(pcie->intx_domain);
+ pcie->intx_domain = NULL;
+ }
+
+ if (pcie->mdb_domain) {
+ irq_domain_remove(pcie->mdb_domain);
+ pcie->mdb_domain = NULL;
+ }
+}
+
+static int amd_mdb_pcie_init_port(struct amd_mdb_pcie *pcie)
+{
+ unsigned long val;
+
+ /* Disable all TLP interrupts. */
+ writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
+ pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+
+ /* Clear pending TLP interrupts. */
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ val &= AMD_MDB_PCIE_IMR_ALL_MASK;
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+
+ /* Enable all TLP interrupts. */
+ writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
+ pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+
+ return 0;
+}
+
+/**
+ * amd_mdb_pcie_init_irq_domains - Initialize IRQ domain
+ * @pcie: PCIe port information
+ * @pdev: Platform device
+ *
+ * Return: Returns '0' on success and error value on failure.
+ */
+static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+ int err;
+
+ pcie_intc_node = of_get_child_by_name(node, "interrupt-controller");
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ pcie->mdb_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, pcie);
+ if (!pcie->mdb_domain) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to add MDB domain\n");
+ goto out;
+ }
+
+ irq_domain_update_bus_token(pcie->mdb_domain, DOMAIN_BUS_NEXUS);
+
+ pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX, &amd_intx_domain_ops, pcie);
+ if (!pcie->intx_domain) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to add INTx domain\n");
+ goto mdb_out;
+ }
+
+ of_node_put(pcie_intc_node);
+ irq_domain_update_bus_token(pcie->intx_domain, DOMAIN_BUS_WIRED);
+
+ raw_spin_lock_init(&pp->lock);
+
+ return 0;
+mdb_out:
+ amd_mdb_pcie_free_irq_domains(pcie);
+out:
+ of_node_put(pcie_intc_node);
+ return err;
+}
+
+static irqreturn_t amd_mdb_pcie_intr_handler(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ struct device *dev;
+ struct irq_data *d;
+
+ dev = pcie->pci.dev;
+
+ /*
+ * In the future, error reporting will be hooked to the AER subsystem.
+ * Currently, the driver prints a warning message to the user.
+ */
+ d = irq_domain_get_irq_data(pcie->mdb_domain, irq);
+ if (intr_cause[d->hwirq].str)
+ dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
+ else
+ dev_warn_once(dev, "Unknown IRQ %ld\n", d->hwirq);
+
+ return IRQ_HANDLED;
+}
+
+static int amd_mdb_setup_irq(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int i, irq, err;
+
+ amd_mdb_pcie_init_port(pcie);
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
+ if (!intr_cause[i].str)
+ continue;
+
+ irq = irq_create_mapping(pcie->mdb_domain, i);
+ if (!irq) {
+ dev_err(dev, "Failed to map MDB domain interrupt\n");
+ return -ENOMEM;
+ }
+
+ err = devm_request_irq(dev, irq, amd_mdb_pcie_intr_handler,
+ IRQF_NO_THREAD, intr_cause[i].sym, pcie);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d, err=%d\n",
+ irq, err);
+ return err;
+ }
+ }
+
+ pcie->intx_irq = irq_create_mapping(pcie->mdb_domain,
+ AMD_MDB_PCIE_INTR_INTX);
+ if (!pcie->intx_irq) {
+ dev_err(dev, "Failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, pcie->intx_irq, dw_pcie_rp_intx,
+ IRQF_NO_THREAD, NULL, pcie);
+ if (err) {
+ dev_err(dev, "Failed to request INTx IRQ %d, err=%d\n",
+ irq, err);
+ return err;
+ }
+
+ /* Plug the main event handler. */
+ err = devm_request_irq(dev, pp->irq, amd_mdb_pcie_event, IRQF_NO_THREAD,
+ "amd_mdb pcie_irq", pcie);
+ if (err) {
+ dev_err(dev, "Failed to request event IRQ %d, err=%d\n",
+ pp->irq, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int amd_mdb_parse_pcie_port(struct amd_mdb_pcie *pcie)
+{
+ struct device *dev = pcie->pci.dev;
+ struct device_node *pcie_port_node __maybe_unused;
+
+ /*
+ * This platform currently supports only one Root Port, so the loop
+ * will execute only once.
+ * TODO: Enhance the driver to handle multiple Root Ports in the future.
+ */
+ for_each_child_of_node_with_prefix(dev->of_node, pcie_port_node, "pcie") {
+ pcie->perst_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(pcie_port_node),
+ "reset", GPIOD_OUT_HIGH, NULL);
+ if (IS_ERR(pcie->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->perst_gpio),
+ "Failed to request reset GPIO\n");
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int amd_mdb_add_pcie_port(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int err;
+
+ pcie->slcr = devm_platform_ioremap_resource_byname(pdev, "slcr");
+ if (IS_ERR(pcie->slcr))
+ return PTR_ERR(pcie->slcr);
+
+ err = amd_mdb_pcie_init_irq_domains(pcie, pdev);
+ if (err)
+ return err;
+
+ err = amd_mdb_setup_irq(pcie, pdev);
+ if (err) {
+ dev_err(dev, "Failed to set up interrupts, err=%d\n", err);
+ goto out;
+ }
+
+ pp->ops = &amd_mdb_pcie_host_ops;
+
+ if (pcie->perst_gpio) {
+ mdelay(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(pcie->perst_gpio, 0);
+ mdelay(PCIE_RESET_CONFIG_WAIT_MS);
+ }
+
+ err = dw_pcie_host_init(pp);
+ if (err) {
+ dev_err(dev, "Failed to initialize host, err=%d\n", err);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ amd_mdb_pcie_free_irq_domains(pcie);
+ return err;
+}
+
+static int amd_mdb_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct amd_mdb_pcie *pcie;
+ struct dw_pcie *pci;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+
+ platform_set_drvdata(pdev, pcie);
+
+ ret = amd_mdb_parse_pcie_port(pcie);
+ /*
+ * If amd_mdb_parse_pcie_port returns -ENODEV, it indicates that the
+ * PCIe Bridge node was not found in the device tree. This is not
+ * considered a fatal error and will trigger a fallback where the
+ * reset GPIO is acquired directly from the PCIe Host Bridge node.
+ */
+ if (ret) {
+ if (ret != -ENODEV)
+ return ret;
+
+ pcie->perst_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->perst_gpio),
+ "Failed to request reset GPIO\n");
+ }
+
+ return amd_mdb_add_pcie_port(pcie, pdev);
+}
+
+static const struct of_device_id amd_mdb_pcie_of_match[] = {
+ {
+ .compatible = "amd,versal2-mdb-host",
+ },
+ {},
+};
+
+static struct platform_driver amd_mdb_pcie_driver = {
+ .driver = {
+ .name = "amd-mdb-pcie",
+ .of_match_table = amd_mdb_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = amd_mdb_pcie_probe,
+};
+
+builtin_platform_driver(amd_mdb_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index 5c999e15c357..c2650fd0d458 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -139,7 +139,7 @@ static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie)
return ret;
}
-static int armada8k_pcie_link_up(struct dw_pcie *pci)
+static bool armada8k_pcie_link_up(struct dw_pcie *pci)
{
u32 reg;
u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
@@ -147,10 +147,10 @@ static int armada8k_pcie_link_up(struct dw_pcie *pci)
reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG);
if ((reg & mask) == mask)
- return 1;
+ return true;
dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg);
- return 0;
+ return false;
}
static int armada8k_pcie_start_link(struct dw_pcie *pci)
@@ -225,7 +225,7 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
}
static const struct dw_pcie_host_ops armada8k_pcie_host_ops = {
- .host_init = armada8k_pcie_host_init,
+ .init = armada8k_pcie_host_init,
};
static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 98102079e26d..f4a136ee2daf 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -10,7 +10,7 @@
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
@@ -94,7 +94,7 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
regmap_write(artpec6_pcie->regmap, offset, val);
}
-static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
{
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
struct dw_pcie_rp *pp = &pci->pp;
@@ -102,13 +102,13 @@ static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
switch (artpec6_pcie->mode) {
case DW_PCIE_RC_TYPE:
- return pci_addr - pp->cfg0_base;
+ return cpu_addr - pp->cfg0_base;
case DW_PCIE_EP_TYPE:
- return pci_addr - ep->phys_base;
+ return cpu_addr - ep->phys_base;
default:
dev_err(pci->dev, "UNKNOWN device type\n");
}
- return pci_addr;
+ return cpu_addr;
}
static int artpec6_pcie_establish_link(struct dw_pcie *pci)
@@ -333,7 +333,7 @@ static int artpec6_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
- .host_init = artpec6_pcie_host_init,
+ .init = artpec6_pcie_host_init,
};
static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
@@ -352,15 +352,15 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
}
static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
+ case PCI_IRQ_INTX:
+ dev_err(pci->dev, "EP cannot trigger INTx IRQs\n");
return -EINVAL;
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
@@ -369,9 +369,20 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+static const struct pci_epc_features artpec6_pcie_epc_features = {
+ .msi_capable = true,
+};
+
+static const struct pci_epc_features *
+artpec6_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &artpec6_pcie_epc_features;
+}
+
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .ep_init = artpec6_pcie_ep_init,
+ .init = artpec6_pcie_ep_init,
.raise_irq = artpec6_pcie_raise_irq,
+ .get_features = artpec6_pcie_get_features,
};
static int artpec6_pcie_probe(struct platform_device *pdev)
@@ -441,7 +452,20 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
- return dw_pcie_ep_init(&pci->ep);
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
+ break;
default:
dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
}
diff --git a/drivers/pci/controller/dwc/pcie-bt1.c b/drivers/pci/controller/dwc/pcie-bt1.c
index 3346770e6654..1340edc18d12 100644
--- a/drivers/pci/controller/dwc/pcie-bt1.c
+++ b/drivers/pci/controller/dwc/pcie-bt1.c
@@ -559,8 +559,8 @@ static void bt1_pcie_host_deinit(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops bt1_pcie_host_ops = {
- .host_init = bt1_pcie_host_init,
- .host_deinit = bt1_pcie_host_deinit,
+ .init = bt1_pcie_host_init,
+ .deinit = bt1_pcie_host_deinit,
};
static struct bt1_pcie *bt1_pcie_create_data(struct platform_device *pdev)
@@ -583,6 +583,10 @@ static int bt1_pcie_add_port(struct bt1_pcie *btpci)
struct device *dev = &btpci->pdev->dev;
int ret;
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
btpci->dw.version = DW_PCIE_VER_460A;
btpci->dw.dev = dev;
btpci->dw.ops = &bt1_pcie_ops;
@@ -613,13 +617,11 @@ static int bt1_pcie_probe(struct platform_device *pdev)
return bt1_pcie_add_port(btpci);
}
-static int bt1_pcie_remove(struct platform_device *pdev)
+static void bt1_pcie_remove(struct platform_device *pdev)
{
struct bt1_pcie *btpci = platform_get_drvdata(pdev);
bt1_pcie_del_port(btpci);
-
- return 0;
}
static const struct of_device_id bt1_pcie_of_match[] = {
diff --git a/drivers/pci/controller/dwc/pcie-designware-debugfs.c b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
new file mode 100644
index 000000000000..0fbf86c0b97e
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
@@ -0,0 +1,927 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe controller debugfs driver
+ *
+ * Copyright (C) 2025 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Shradha Todi <shradha.t@samsung.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "pcie-designware.h"
+
+#define SD_STATUS_L1LANE_REG 0xb0
+#define PIPE_RXVALID BIT(18)
+#define PIPE_DETECT_LANE BIT(17)
+#define LANE_SELECT GENMASK(3, 0)
+
+#define ERR_INJ0_OFF 0x34
+#define EINJ_VAL_DIFF GENMASK(28, 16)
+#define EINJ_VC_NUM GENMASK(14, 12)
+#define EINJ_TYPE_SHIFT 8
+#define EINJ0_TYPE GENMASK(11, 8)
+#define EINJ1_TYPE BIT(8)
+#define EINJ2_TYPE GENMASK(9, 8)
+#define EINJ3_TYPE GENMASK(10, 8)
+#define EINJ4_TYPE GENMASK(10, 8)
+#define EINJ5_TYPE BIT(8)
+#define EINJ_COUNT GENMASK(7, 0)
+
+#define ERR_INJ_ENABLE_REG 0x30
+
+#define RAS_DES_EVENT_COUNTER_DATA_REG 0xc
+
+#define RAS_DES_EVENT_COUNTER_CTRL_REG 0x8
+#define EVENT_COUNTER_GROUP_SELECT GENMASK(27, 24)
+#define EVENT_COUNTER_EVENT_SELECT GENMASK(23, 16)
+#define EVENT_COUNTER_LANE_SELECT GENMASK(11, 8)
+#define EVENT_COUNTER_STATUS BIT(7)
+#define EVENT_COUNTER_ENABLE GENMASK(4, 2)
+#define PER_EVENT_ON 0x3
+#define PER_EVENT_OFF 0x1
+
+#define DWC_DEBUGFS_BUF_MAX 128
+
+/**
+ * struct dwc_pcie_rasdes_info - Stores controller common information
+ * @ras_cap_offset: RAS DES vendor specific extended capability offset
+ * @reg_event_lock: Mutex used for RAS DES shadow event registers
+ *
+ * Any parameter constant to all files of the debugfs hierarchy for a single
+ * controller will be stored in this struct. It is allocated and assigned to
+ * controller specific struct dw_pcie during initialization.
+ */
+struct dwc_pcie_rasdes_info {
+ u32 ras_cap_offset;
+ struct mutex reg_event_lock;
+};
+
+/**
+ * struct dwc_pcie_rasdes_priv - Stores file specific private data information
+ * @pci: Reference to the dw_pcie structure
+ * @idx: Index of specific file related information in array of structs
+ *
+ * All debugfs files will have this struct as its private data.
+ */
+struct dwc_pcie_rasdes_priv {
+ struct dw_pcie *pci;
+ int idx;
+};
+
+/**
+ * struct dwc_pcie_err_inj - Store details about each error injection
+ * supported by DWC RAS DES
+ * @name: Name of the error that can be injected
+ * @err_inj_group: Group number to which the error belongs. The value
+ * can range from 0 to 5
+ * @err_inj_type: Each group can have multiple types of error
+ */
+struct dwc_pcie_err_inj {
+ const char *name;
+ u32 err_inj_group;
+ u32 err_inj_type;
+};
+
+static const struct dwc_pcie_err_inj err_inj_list[] = {
+ {"tx_lcrc", 0x0, 0x0},
+ {"b16_crc_dllp", 0x0, 0x1},
+ {"b16_crc_upd_fc", 0x0, 0x2},
+ {"tx_ecrc", 0x0, 0x3},
+ {"fcrc_tlp", 0x0, 0x4},
+ {"parity_tsos", 0x0, 0x5},
+ {"parity_skpos", 0x0, 0x6},
+ {"rx_lcrc", 0x0, 0x8},
+ {"rx_ecrc", 0x0, 0xb},
+ {"tlp_err_seq", 0x1, 0x0},
+ {"ack_nak_dllp_seq", 0x1, 0x1},
+ {"ack_nak_dllp", 0x2, 0x0},
+ {"upd_fc_dllp", 0x2, 0x1},
+ {"nak_dllp", 0x2, 0x2},
+ {"inv_sync_hdr_sym", 0x3, 0x0},
+ {"com_pad_ts1", 0x3, 0x1},
+ {"com_pad_ts2", 0x3, 0x2},
+ {"com_fts", 0x3, 0x3},
+ {"com_idl", 0x3, 0x4},
+ {"end_edb", 0x3, 0x5},
+ {"stp_sdp", 0x3, 0x6},
+ {"com_skp", 0x3, 0x7},
+ {"posted_tlp_hdr", 0x4, 0x0},
+ {"non_post_tlp_hdr", 0x4, 0x1},
+ {"cmpl_tlp_hdr", 0x4, 0x2},
+ {"posted_tlp_data", 0x4, 0x4},
+ {"non_post_tlp_data", 0x4, 0x5},
+ {"cmpl_tlp_data", 0x4, 0x6},
+ {"duplicate_tlp", 0x5, 0x0},
+ {"nullified_tlp", 0x5, 0x1},
+};
+
+static const u32 err_inj_type_mask[] = {
+ EINJ0_TYPE,
+ EINJ1_TYPE,
+ EINJ2_TYPE,
+ EINJ3_TYPE,
+ EINJ4_TYPE,
+ EINJ5_TYPE,
+};
+
+/**
+ * struct dwc_pcie_event_counter - Store details about each event counter
+ * supported in DWC RAS DES
+ * @name: Name of the error counter
+ * @group_no: Group number that the event belongs to. The value can range
+ * from 0 to 4
+ * @event_no: Event number of the particular event. The value ranges are:
+ * Group 0: 0 - 10
+ * Group 1: 5 - 13
+ * Group 2: 0 - 7
+ * Group 3: 0 - 5
+ * Group 4: 0 - 1
+ */
+struct dwc_pcie_event_counter {
+ const char *name;
+ u32 group_no;
+ u32 event_no;
+};
+
+static const struct dwc_pcie_event_counter event_list[] = {
+ {"ebuf_overflow", 0x0, 0x0},
+ {"ebuf_underrun", 0x0, 0x1},
+ {"decode_err", 0x0, 0x2},
+ {"running_disparity_err", 0x0, 0x3},
+ {"skp_os_parity_err", 0x0, 0x4},
+ {"sync_header_err", 0x0, 0x5},
+ {"rx_valid_deassertion", 0x0, 0x6},
+ {"ctl_skp_os_parity_err", 0x0, 0x7},
+ {"retimer_parity_err_1st", 0x0, 0x8},
+ {"retimer_parity_err_2nd", 0x0, 0x9},
+ {"margin_crc_parity_err", 0x0, 0xA},
+ {"detect_ei_infer", 0x1, 0x5},
+ {"receiver_err", 0x1, 0x6},
+ {"rx_recovery_req", 0x1, 0x7},
+ {"n_fts_timeout", 0x1, 0x8},
+ {"framing_err", 0x1, 0x9},
+ {"deskew_err", 0x1, 0xa},
+ {"framing_err_in_l0", 0x1, 0xc},
+ {"deskew_uncompleted_err", 0x1, 0xd},
+ {"bad_tlp", 0x2, 0x0},
+ {"lcrc_err", 0x2, 0x1},
+ {"bad_dllp", 0x2, 0x2},
+ {"replay_num_rollover", 0x2, 0x3},
+ {"replay_timeout", 0x2, 0x4},
+ {"rx_nak_dllp", 0x2, 0x5},
+ {"tx_nak_dllp", 0x2, 0x6},
+ {"retry_tlp", 0x2, 0x7},
+ {"fc_timeout", 0x3, 0x0},
+ {"poisoned_tlp", 0x3, 0x1},
+ {"ecrc_error", 0x3, 0x2},
+ {"unsupported_request", 0x3, 0x3},
+ {"completer_abort", 0x3, 0x4},
+ {"completion_timeout", 0x3, 0x5},
+ {"ebuf_skp_add", 0x4, 0x0},
+ {"ebuf_skp_del", 0x4, 0x1},
+};
+
+static ssize_t lane_detect_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val = FIELD_GET(PIPE_DETECT_LANE, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Detected\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Undetected\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t lane_detect_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 lane, val;
+
+ val = kstrtou32_from_user(buf, count, 0, &lane);
+ if (val)
+ return val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val &= ~(LANE_SELECT);
+ val |= FIELD_PREP(LANE_SELECT, lane);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG, val);
+
+ return count;
+}
+
+static ssize_t rx_valid_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val = FIELD_GET(PIPE_RXVALID, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Valid\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Invalid\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t rx_valid_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return lane_detect_write(file, buf, count, ppos);
+}
+
+static ssize_t err_inj_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, counter, vc_num, err_group, type_mask;
+ int val_diff = 0;
+ char *kern_buf;
+
+ err_group = err_inj_list[pdata->idx].err_inj_group;
+ type_mask = err_inj_type_mask[err_group];
+
+ kern_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+ if (err_group == 4) {
+ val = sscanf(kern_buf, "%u %d %u", &counter, &val_diff, &vc_num);
+ if ((val != 3) || (val_diff < -4095 || val_diff > 4095)) {
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ } else if (err_group == 1) {
+ val = sscanf(kern_buf, "%u %d", &counter, &val_diff);
+ if ((val != 2) || (val_diff < -4095 || val_diff > 4095)) {
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ } else {
+ val = kstrtou32(kern_buf, 0, &counter);
+ if (val) {
+ kfree(kern_buf);
+ return val;
+ }
+ }
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group));
+ val &= ~(type_mask | EINJ_COUNT);
+ val |= ((err_inj_list[pdata->idx].err_inj_type << EINJ_TYPE_SHIFT) & type_mask);
+ val |= FIELD_PREP(EINJ_COUNT, counter);
+
+ if (err_group == 1 || err_group == 4) {
+ val &= ~(EINJ_VAL_DIFF);
+ val |= FIELD_PREP(EINJ_VAL_DIFF, val_diff);
+ }
+ if (err_group == 4) {
+ val &= ~(EINJ_VC_NUM);
+ val |= FIELD_PREP(EINJ_VC_NUM, vc_num);
+ }
+
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group), val);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ_ENABLE_REG, (0x1 << err_group));
+
+ kfree(kern_buf);
+ return count;
+}
+
+static void set_event_number(struct dwc_pcie_rasdes_priv *pdata,
+ struct dw_pcie *pci, struct dwc_pcie_rasdes_info *rinfo)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ val &= ~EVENT_COUNTER_ENABLE;
+ val &= ~(EVENT_COUNTER_GROUP_SELECT | EVENT_COUNTER_EVENT_SELECT);
+ val |= FIELD_PREP(EVENT_COUNTER_GROUP_SELECT, event_list[pdata->idx].group_no);
+ val |= FIELD_PREP(EVENT_COUNTER_EVENT_SELECT, event_list[pdata->idx].event_no);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+}
+
+static ssize_t counter_enable_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ val = FIELD_GET(EVENT_COUNTER_STATUS, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Enabled\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Disabled\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t counter_enable_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, enable;
+
+ val = kstrtou32_from_user(buf, count, 0, &enable);
+ if (val)
+ return val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ if (enable)
+ val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_ON);
+ else
+ val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_OFF);
+
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+
+ /*
+ * While enabling the counter, always read the status back to check if
+ * it is enabled or not. Return error if it is not enabled to let the
+ * users know that the counter is not supported on the platform.
+ */
+ if (enable) {
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset +
+ RAS_DES_EVENT_COUNTER_CTRL_REG);
+ if (!FIELD_GET(EVENT_COUNTER_STATUS, val)) {
+ mutex_unlock(&rinfo->reg_event_lock);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ mutex_unlock(&rinfo->reg_event_lock);
+
+ return count;
+}
+
+static ssize_t counter_lane_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ val = FIELD_GET(EVENT_COUNTER_LANE_SELECT, val);
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane: %d\n", val);
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t counter_lane_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, lane;
+
+ val = kstrtou32_from_user(buf, count, 0, &lane);
+ if (val)
+ return val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ val &= ~(EVENT_COUNTER_LANE_SELECT);
+ val |= FIELD_PREP(EVENT_COUNTER_LANE_SELECT, lane);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+ mutex_unlock(&rinfo->reg_event_lock);
+
+ return count;
+}
+
+static ssize_t counter_value_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_DATA_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter value: %d\n", val);
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static const char *ltssm_status_string(enum dw_pcie_ltssm ltssm)
+{
+ const char *str;
+
+ switch (ltssm) {
+#define DW_PCIE_LTSSM_NAME(n) case n: str = #n; break
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_QUIET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_ACT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_ACTIVE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_COMPLIANCE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_CONFIG);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_PRE_DETECT_QUIET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_WAIT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_START);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_ACEPT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_WAI);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_ACEPT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_COMPLETE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_LOCK);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_SPEED);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_RCVRCFG);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0S);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L123_SEND_EIDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L1_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_WAKE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ACTIVE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ0);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ1);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ2);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ3);
+ default:
+ str = "DW_PCIE_LTSSM_UNKNOWN";
+ break;
+ }
+
+ return str + strlen("DW_PCIE_LTSSM_");
+}
+
+static int ltssm_status_show(struct seq_file *s, void *v)
+{
+ struct dw_pcie *pci = s->private;
+ enum dw_pcie_ltssm val;
+
+ val = dw_pcie_get_ltssm(pci);
+ seq_printf(s, "%s (0x%02x)\n", ltssm_status_string(val), val);
+
+ return 0;
+}
+
+static int ltssm_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ltssm_status_show, inode->i_private);
+}
+
+#define dwc_debugfs_create(name) \
+debugfs_create_file(#name, 0644, rasdes_debug, pci, \
+ &dbg_ ## name ## _fops)
+
+#define DWC_DEBUGFS_FOPS(name) \
+static const struct file_operations dbg_ ## name ## _fops = { \
+ .open = simple_open, \
+ .read = name ## _read, \
+ .write = name ## _write \
+}
+
+DWC_DEBUGFS_FOPS(lane_detect);
+DWC_DEBUGFS_FOPS(rx_valid);
+
+static const struct file_operations dwc_pcie_err_inj_ops = {
+ .open = simple_open,
+ .write = err_inj_write,
+};
+
+static const struct file_operations dwc_pcie_counter_enable_ops = {
+ .open = simple_open,
+ .read = counter_enable_read,
+ .write = counter_enable_write,
+};
+
+static const struct file_operations dwc_pcie_counter_lane_ops = {
+ .open = simple_open,
+ .read = counter_lane_read,
+ .write = counter_lane_write,
+};
+
+static const struct file_operations dwc_pcie_counter_value_ops = {
+ .open = simple_open,
+ .read = counter_value_read,
+};
+
+static const struct file_operations dwc_pcie_ltssm_status_ops = {
+ .open = ltssm_status_open,
+ .read = seq_read,
+};
+
+static void dwc_pcie_rasdes_debugfs_deinit(struct dw_pcie *pci)
+{
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+
+ mutex_destroy(&rinfo->reg_event_lock);
+}
+
+static int dwc_pcie_rasdes_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
+{
+ struct dentry *rasdes_debug, *rasdes_err_inj;
+ struct dentry *rasdes_event_counter, *rasdes_events;
+ struct dwc_pcie_rasdes_info *rasdes_info;
+ struct dwc_pcie_rasdes_priv *priv_tmp;
+ struct device *dev = pci->dev;
+ int ras_cap, i, ret;
+
+ /*
+ * If a given SoC has no RAS DES capability, the following call is
+ * bound to return an error, breaking some existing platforms. So,
+ * return 0 here, as this is not necessarily an error.
+ */
+ ras_cap = dw_pcie_find_rasdes_capability(pci);
+ if (!ras_cap) {
+ dev_dbg(dev, "no RAS DES capability available\n");
+ return 0;
+ }
+
+ rasdes_info = devm_kzalloc(dev, sizeof(*rasdes_info), GFP_KERNEL);
+ if (!rasdes_info)
+ return -ENOMEM;
+
+ /* Create subdirectories for Debug, Error Injection, Statistics. */
+ rasdes_debug = debugfs_create_dir("rasdes_debug", dir);
+ rasdes_err_inj = debugfs_create_dir("rasdes_err_inj", dir);
+ rasdes_event_counter = debugfs_create_dir("rasdes_event_counter", dir);
+
+ mutex_init(&rasdes_info->reg_event_lock);
+ rasdes_info->ras_cap_offset = ras_cap;
+ pci->debugfs->rasdes_info = rasdes_info;
+
+ /* Create debugfs files for Debug subdirectory. */
+ dwc_debugfs_create(lane_detect);
+ dwc_debugfs_create(rx_valid);
+
+ /* Create debugfs files for Error Injection subdirectory. */
+ for (i = 0; i < ARRAY_SIZE(err_inj_list); i++) {
+ priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
+ if (!priv_tmp) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+
+ priv_tmp->idx = i;
+ priv_tmp->pci = pci;
+ debugfs_create_file(err_inj_list[i].name, 0200, rasdes_err_inj, priv_tmp,
+ &dwc_pcie_err_inj_ops);
+ }
+
+ /* Create debugfs files for Statistical Counter subdirectory. */
+ for (i = 0; i < ARRAY_SIZE(event_list); i++) {
+ priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
+ if (!priv_tmp) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+
+ priv_tmp->idx = i;
+ priv_tmp->pci = pci;
+ rasdes_events = debugfs_create_dir(event_list[i].name, rasdes_event_counter);
+ if (event_list[i].group_no == 0 || event_list[i].group_no == 4) {
+ debugfs_create_file("lane_select", 0644, rasdes_events,
+ priv_tmp, &dwc_pcie_counter_lane_ops);
+ }
+ debugfs_create_file("counter_value", 0444, rasdes_events, priv_tmp,
+ &dwc_pcie_counter_value_ops);
+ debugfs_create_file("counter_enable", 0644, rasdes_events, priv_tmp,
+ &dwc_pcie_counter_enable_ops);
+ }
+
+ return 0;
+
+err_deinit:
+ dwc_pcie_rasdes_debugfs_deinit(pci);
+ return ret;
+}
+
+static void dwc_pcie_ltssm_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
+{
+ debugfs_create_file("ltssm_status", 0444, dir, pci,
+ &dwc_pcie_ltssm_status_ops);
+}
+
+static int dw_pcie_ptm_check_capability(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ pci->ptm_vsec_offset = dw_pcie_find_ptm_capability(pci);
+
+ return pci->ptm_vsec_offset;
+}
+
+static int dw_pcie_ptm_context_update_write(void *drvdata, u8 mode)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val |= PTM_REQ_AUTO_UPDATE_ENABLED;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else if (mode == PCIE_PTM_CONTEXT_UPDATE_MANUAL) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val &= ~PTM_REQ_AUTO_UPDATE_ENABLED;
+ val |= PTM_REQ_START_UPDATE;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_update_read(void *drvdata, u8 *mode)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ if (FIELD_GET(PTM_REQ_AUTO_UPDATE_ENABLED, val))
+ *mode = PCIE_PTM_CONTEXT_UPDATE_AUTO;
+ else
+ /*
+ * PTM_REQ_START_UPDATE is a self clearing register bit. So if
+ * PTM_REQ_AUTO_UPDATE_ENABLED is not set, then it implies that
+ * manual update is used.
+ */
+ *mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_valid_write(void *drvdata, bool valid)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ if (valid) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val |= PTM_RES_CCONTEXT_VALID;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val &= ~PTM_RES_CCONTEXT_VALID;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_valid_read(void *drvdata, bool *valid)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ *valid = !!FIELD_GET(PTM_RES_CCONTEXT_VALID, val);
+
+ return 0;
+}
+
+static int dw_pcie_ptm_local_clock_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_master_clock_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t1_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t2_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t3_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t4_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static bool dw_pcie_ptm_context_update_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_EP_TYPE;
+}
+
+static bool dw_pcie_ptm_context_valid_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_RC_TYPE;
+}
+
+static bool dw_pcie_ptm_local_clock_visible(void *drvdata)
+{
+ /* PTM local clock is always visible */
+ return true;
+}
+
+static bool dw_pcie_ptm_master_clock_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_EP_TYPE;
+}
+
+static bool dw_pcie_ptm_t1_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_EP_TYPE;
+}
+
+static bool dw_pcie_ptm_t2_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_RC_TYPE;
+}
+
+static bool dw_pcie_ptm_t3_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_RC_TYPE;
+}
+
+static bool dw_pcie_ptm_t4_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_EP_TYPE;
+}
+
+static const struct pcie_ptm_ops dw_pcie_ptm_ops = {
+ .check_capability = dw_pcie_ptm_check_capability,
+ .context_update_write = dw_pcie_ptm_context_update_write,
+ .context_update_read = dw_pcie_ptm_context_update_read,
+ .context_valid_write = dw_pcie_ptm_context_valid_write,
+ .context_valid_read = dw_pcie_ptm_context_valid_read,
+ .local_clock_read = dw_pcie_ptm_local_clock_read,
+ .master_clock_read = dw_pcie_ptm_master_clock_read,
+ .t1_read = dw_pcie_ptm_t1_read,
+ .t2_read = dw_pcie_ptm_t2_read,
+ .t3_read = dw_pcie_ptm_t3_read,
+ .t4_read = dw_pcie_ptm_t4_read,
+ .context_update_visible = dw_pcie_ptm_context_update_visible,
+ .context_valid_visible = dw_pcie_ptm_context_valid_visible,
+ .local_clock_visible = dw_pcie_ptm_local_clock_visible,
+ .master_clock_visible = dw_pcie_ptm_master_clock_visible,
+ .t1_visible = dw_pcie_ptm_t1_visible,
+ .t2_visible = dw_pcie_ptm_t2_visible,
+ .t3_visible = dw_pcie_ptm_t3_visible,
+ .t4_visible = dw_pcie_ptm_t4_visible,
+};
+
+void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
+{
+ if (!pci->debugfs)
+ return;
+
+ pcie_ptm_destroy_debugfs(pci->ptm_debugfs);
+ dwc_pcie_rasdes_debugfs_deinit(pci);
+ debugfs_remove_recursive(pci->debugfs->debug_dir);
+}
+
+void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode)
+{
+ char dirname[DWC_DEBUGFS_BUF_MAX];
+ struct device *dev = pci->dev;
+ struct debugfs_info *debugfs;
+ struct dentry *dir;
+ int err;
+
+ /* Create main directory for each platform driver. */
+ snprintf(dirname, DWC_DEBUGFS_BUF_MAX, "dwc_pcie_%s", dev_name(dev));
+ dir = debugfs_create_dir(dirname, NULL);
+ debugfs = devm_kzalloc(dev, sizeof(*debugfs), GFP_KERNEL);
+ if (!debugfs)
+ return;
+
+ debugfs->debug_dir = dir;
+ pci->debugfs = debugfs;
+ err = dwc_pcie_rasdes_debugfs_init(pci, dir);
+ if (err)
+ dev_err(dev, "failed to initialize RAS DES debugfs, err=%d\n",
+ err);
+
+ dwc_pcie_ltssm_debugfs_init(pci, dir);
+
+ pci->mode = mode;
+ pci->ptm_debugfs = pcie_ptm_create_debugfs(pci->dev, pci,
+ &dw_pcie_ptm_ops);
+}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index d06654895eba..19571ac2b961 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -6,6 +6,8 @@
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*/
+#include <linux/align.h>
+#include <linux/bitfield.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -13,22 +15,14 @@
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
-void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
-{
- struct pci_epc *epc = ep->epc;
-
- pci_epc_linkup(epc);
-}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
-
-void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
-{
- struct pci_epc *epc = ep->epc;
-
- pci_epc_init_notify(epc);
-}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
-
+/**
+ * dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to
+ * the endpoint function
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ *
+ * Return: struct dw_pcie_ep_func if success, NULL otherwise.
+ */
struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
{
@@ -42,36 +36,28 @@ dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
return NULL;
}
-static unsigned int dw_pcie_ep_func_select(struct dw_pcie_ep *ep, u8 func_no)
-{
- unsigned int func_offset = 0;
-
- if (ep->ops->func_conf_select)
- func_offset = ep->ops->func_conf_select(ep, func_no);
-
- return func_offset;
-}
-
static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
enum pci_barno bar, int flags)
{
- u32 reg;
- unsigned int func_offset = 0;
struct dw_pcie_ep *ep = &pci->ep;
+ u32 reg;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = func_offset + PCI_BASE_ADDRESS_0 + (4 * bar);
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar);
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writel_dbi2(pci, reg, 0x0);
- dw_pcie_writel_dbi(pci, reg, 0x0);
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, 0x0);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, 0x0);
if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
- dw_pcie_writel_dbi2(pci, reg + 4, 0x0);
- dw_pcie_writel_dbi(pci, reg + 4, 0x0);
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0x0);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0x0);
}
dw_pcie_dbi_ro_wr_dis(pci);
}
+/**
+ * dw_pcie_ep_reset_bar - Reset endpoint BAR
+ * @pci: DWC PCI device
+ * @bar: BAR number of the endpoint
+ */
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
u8 func_no, funcs;
@@ -83,77 +69,79 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
-static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
- u8 cap_ptr, u8 cap)
+static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
{
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- unsigned int func_offset = 0;
- u8 cap_id, next_cap_ptr;
- u16 reg;
-
- if (!cap_ptr)
- return 0;
-
- func_offset = dw_pcie_ep_func_select(ep, func_no);
+ return PCI_FIND_NEXT_CAP(dw_pcie_ep_read_cfg, PCI_CAPABILITY_LIST,
+ cap, ep, func_no);
+}
- reg = dw_pcie_readw_dbi(pci, func_offset + cap_ptr);
- cap_id = (reg & 0x00ff);
+/**
+ * dw_pcie_ep_hide_ext_capability - Hide a capability from the linked list
+ * @pci: DWC PCI device
+ * @prev_cap: Capability preceding the capability that should be hidden
+ * @cap: Capability that should be hidden
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap)
+{
+ u16 prev_cap_offset, cap_offset;
+ u32 prev_cap_header, cap_header;
- if (cap_id > PCI_CAP_ID_MAX)
- return 0;
+ prev_cap_offset = dw_pcie_find_ext_capability(pci, prev_cap);
+ if (!prev_cap_offset)
+ return -EINVAL;
- if (cap_id == cap)
- return cap_ptr;
+ prev_cap_header = dw_pcie_readl_dbi(pci, prev_cap_offset);
+ cap_offset = PCI_EXT_CAP_NEXT(prev_cap_header);
+ cap_header = dw_pcie_readl_dbi(pci, cap_offset);
- next_cap_ptr = (reg & 0xff00) >> 8;
- return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
-}
+ /* cap must immediately follow prev_cap. */
+ if (PCI_EXT_CAP_ID(cap_header) != cap)
+ return -EINVAL;
-static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
-{
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- unsigned int func_offset = 0;
- u8 next_cap_ptr;
- u16 reg;
+ /* Clear next ptr. */
+ prev_cap_header &= ~GENMASK(31, 20);
- func_offset = dw_pcie_ep_func_select(ep, func_no);
+ /* Set next ptr to next ptr of cap. */
+ prev_cap_header |= cap_header & GENMASK(31, 20);
- reg = dw_pcie_readw_dbi(pci, func_offset + PCI_CAPABILITY_LIST);
- next_cap_ptr = (reg & 0x00ff);
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, prev_cap_offset, prev_cap_header);
+ dw_pcie_dbi_ro_wr_dis(pci);
- return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
+ return 0;
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_hide_ext_capability);
static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- unsigned int func_offset = 0;
-
- func_offset = dw_pcie_ep_func_select(ep, func_no);
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writew_dbi(pci, func_offset + PCI_VENDOR_ID, hdr->vendorid);
- dw_pcie_writew_dbi(pci, func_offset + PCI_DEVICE_ID, hdr->deviceid);
- dw_pcie_writeb_dbi(pci, func_offset + PCI_REVISION_ID, hdr->revid);
- dw_pcie_writeb_dbi(pci, func_offset + PCI_CLASS_PROG, hdr->progif_code);
- dw_pcie_writew_dbi(pci, func_offset + PCI_CLASS_DEVICE,
- hdr->subclass_code | hdr->baseclass_code << 8);
- dw_pcie_writeb_dbi(pci, func_offset + PCI_CACHE_LINE_SIZE,
- hdr->cache_line_size);
- dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_VENDOR_ID,
- hdr->subsys_vendor_id);
- dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_ID, hdr->subsys_id);
- dw_pcie_writeb_dbi(pci, func_offset + PCI_INTERRUPT_PIN,
- hdr->interrupt_pin);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_VENDOR_ID, hdr->vendorid);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_DEVICE_ID, hdr->deviceid);
+ dw_pcie_ep_writeb_dbi(ep, func_no, PCI_REVISION_ID, hdr->revid);
+ dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CLASS_PROG, hdr->progif_code);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_CLASS_DEVICE,
+ hdr->subclass_code | hdr->baseclass_code << 8);
+ dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CACHE_LINE_SIZE,
+ hdr->cache_line_size);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_VENDOR_ID,
+ hdr->subsys_vendor_id);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_ID, hdr->subsys_id);
+ dw_pcie_ep_writeb_dbi(ep, func_no, PCI_INTERRUPT_PIN,
+ hdr->interrupt_pin);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
- dma_addr_t cpu_addr, enum pci_barno bar)
+ dma_addr_t parent_bus_addr, enum pci_barno bar,
+ size_t size)
{
int ret;
u32 free_win;
@@ -162,7 +150,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
if (!ep->bar_to_atu[bar])
free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
else
- free_win = ep->bar_to_atu[bar];
+ free_win = ep->bar_to_atu[bar] - 1;
if (free_win >= pci->num_ib_windows) {
dev_err(pci->dev, "No free inbound window\n");
@@ -170,21 +158,24 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
}
ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
- cpu_addr, bar);
+ parent_bus_addr, bar, size);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
return ret;
}
- ep->bar_to_atu[bar] = free_win;
+ /*
+ * Always increment free_win before assignment, since value 0 is used to identify
+ * unallocated mapping.
+ */
+ ep->bar_to_atu[bar] = free_win + 1;
set_bit(free_win, ep->ib_window_map);
return 0;
}
-static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
- phys_addr_t phys_addr,
- u64 pci_addr, size_t size)
+static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep,
+ struct dw_pcie_ob_atu_cfg *atu)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 free_win;
@@ -196,13 +187,13 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
return -EINVAL;
}
- ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
- phys_addr, pci_addr, size);
+ atu->index = free_win;
+ ret = dw_pcie_prog_outbound_atu(pci, atu);
if (ret)
return ret;
set_bit(free_win, ep->ob_window_map);
- ep->outbound_addr[free_win] = phys_addr;
+ ep->outbound_addr[free_win] = atu->parent_bus_addr;
return 0;
}
@@ -213,7 +204,10 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
- u32 atu_index = ep->bar_to_atu[bar];
+ u32 atu_index = ep->bar_to_atu[bar] - 1;
+
+ if (!ep->bar_to_atu[bar])
+ return;
__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
@@ -223,6 +217,125 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
ep->bar_to_atu[bar] = 0;
}
+static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie *pci,
+ enum pci_barno bar)
+{
+ u32 reg, bar_index;
+ unsigned int offset, nbars;
+ int i;
+
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ if (!offset)
+ return offset;
+
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
+
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ bar_index = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, reg);
+ if (bar_index == bar)
+ return offset;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep *ep, u8 func_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ int flags = epf_bar->flags;
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ unsigned int rebar_offset;
+ u32 rebar_cap, rebar_ctrl;
+ int ret;
+
+ rebar_offset = dw_pcie_ep_get_rebar_offset(pci, bar);
+ if (!rebar_offset)
+ return -EINVAL;
+
+ ret = pci_epc_bar_size_to_rebar_cap(size, &rebar_cap);
+ if (ret)
+ return ret;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ /*
+ * A BAR mask should not be written for a resizable BAR. The BAR mask
+ * is automatically derived by the controller every time the "selected
+ * size" bits are updated, see "Figure 3-26 Resizable BAR Example for
+ * 32-bit Memory BAR0" in DWC EP databook 5.96a. We simply need to write
+ * BIT(0) to set the BAR enable bit.
+ */
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, BIT(0));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
+
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ }
+
+ /*
+ * Bits 31:0 in PCI_REBAR_CAP define "supported sizes" bits for sizes
+ * 1 MB to 128 TB. Bits 31:16 in PCI_REBAR_CTRL define "supported sizes"
+ * bits for sizes 256 TB to 8 EB. Disallow sizes 256 TB to 8 EB.
+ */
+ rebar_ctrl = dw_pcie_readl_dbi(pci, rebar_offset + PCI_REBAR_CTRL);
+ rebar_ctrl &= ~GENMASK(31, 16);
+ dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CTRL, rebar_ctrl);
+
+ /*
+ * The "selected size" (bits 13:8) in PCI_REBAR_CTRL are automatically
+ * updated when writing PCI_REBAR_CAP, see "Figure 3-26 Resizable BAR
+ * Example for 32-bit Memory BAR0" in DWC EP databook 5.96a.
+ */
+ dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CAP, rebar_cap);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep *ep, u8 func_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ int flags = epf_bar->flags;
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
+
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ }
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static enum pci_epc_bar_type dw_pcie_ep_get_bar_type(struct dw_pcie_ep *ep,
+ enum pci_barno bar)
+{
+ const struct pci_epc_features *epc_features;
+
+ if (!ep->ops->get_features)
+ return BAR_PROGRAMMABLE;
+
+ epc_features = ep->ops->get_features(ep);
+
+ return epc_features->bar[bar].type;
+}
+
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
@@ -230,39 +343,77 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
size_t size = epf_bar->size;
+ enum pci_epc_bar_type bar_type;
int flags = epf_bar->flags;
- unsigned int func_offset = 0;
int ret, type;
- u32 reg;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
+ /*
+ * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs
+ * 1 and 2 to form a 64-bit BAR.
+ */
+ if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
+ return -EINVAL;
+
+ /*
+ * Certain EPF drivers dynamically change the physical address of a BAR
+ * (i.e. they call set_bar() twice, without ever calling clear_bar(), as
+ * calling clear_bar() would clear the BAR's PCI address assigned by the
+ * host).
+ */
+ if (ep->epf_bar[bar]) {
+ /*
+ * We can only dynamically change a BAR if the new BAR size and
+ * BAR flags do not differ from the existing configuration.
+ */
+ if (ep->epf_bar[bar]->barno != bar ||
+ ep->epf_bar[bar]->size != size ||
+ ep->epf_bar[bar]->flags != flags)
+ return -EINVAL;
+
+ /*
+ * When dynamically changing a BAR, skip writing the BAR reg, as
+ * that would clear the BAR's PCI address assigned by the host.
+ */
+ goto config_atu;
+ }
+
+ bar_type = dw_pcie_ep_get_bar_type(ep, bar);
+ switch (bar_type) {
+ case BAR_FIXED:
+ /*
+ * There is no need to write a BAR mask for a fixed BAR (except
+ * to write 1 to the LSB of the BAR mask register, to enable the
+ * BAR). Write the BAR mask regardless. (The fixed bits in the
+ * BAR mask register will be read-only anyway.)
+ */
+ fallthrough;
+ case BAR_PROGRAMMABLE:
+ ret = dw_pcie_ep_set_bar_programmable(ep, func_no, epf_bar);
+ break;
+ case BAR_RESIZABLE:
+ ret = dw_pcie_ep_set_bar_resizable(ep, func_no, epf_bar);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(pci->dev, "Invalid BAR type\n");
+ break;
+ }
- reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset;
+ if (ret)
+ return ret;
+config_atu:
if (!(flags & PCI_BASE_ADDRESS_SPACE))
type = PCIE_ATU_TYPE_MEM;
else
type = PCIE_ATU_TYPE_IO;
- ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
+ ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar,
+ size);
if (ret)
return ret;
- if (ep->epf_bar[bar])
- return 0;
-
- dw_pcie_dbi_ro_wr_en(pci);
-
- dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
- dw_pcie_writel_dbi(pci, reg, flags);
-
- if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
- dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1));
- dw_pcie_writel_dbi(pci, reg + 4, 0);
- }
-
ep->epf_bar[bar] = epf_bar;
- dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
@@ -273,7 +424,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
u32 index;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- for (index = 0; index < pci->num_ob_windows; index++) {
+ for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) {
if (ep->outbound_addr[index] != addr)
continue;
*atu_index = index;
@@ -283,6 +434,20 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
return -EINVAL;
}
+static u64 dw_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr,
+ size_t *pci_size, size_t *offset)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u64 mask = pci->region_align - 1;
+ size_t ofst = pci_addr & mask;
+
+ *pci_size = ALIGN(ofst + *pci_size, epc->mem->window.page_size);
+ *offset = ofst;
+
+ return pci_addr & ~mask;
+}
+
static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr)
{
@@ -291,10 +456,12 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- ret = dw_pcie_find_index(ep, addr, &atu_index);
+ ret = dw_pcie_find_index(ep, addr - pci->parent_bus_offset,
+ &atu_index);
if (ret < 0)
return;
+ ep->outbound_addr[atu_index] = 0;
dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index);
clear_bit(atu_index, ep->ob_window_map);
}
@@ -305,8 +472,14 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-
- ret = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
+
+ atu.func_no = func_no;
+ atu.type = PCIE_ATU_TYPE_MEM;
+ atu.parent_bus_addr = addr - pci->parent_bus_offset;
+ atu.pci_addr = pci_addr;
+ atu.size = size;
+ ret = dw_pcie_ep_outbound_atu(ep, &atu);
if (ret) {
dev_err(pci->dev, "Failed to enable address\n");
return ret;
@@ -318,48 +491,42 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- u32 val, reg;
- unsigned int func_offset = 0;
struct dw_pcie_ep_func *ep_func;
+ u32 val, reg;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
- val = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_FLAGS;
+ val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
if (!(val & PCI_MSI_FLAGS_ENABLE))
return -EINVAL;
- val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
+ val = FIELD_GET(PCI_MSI_FLAGS_QSIZE, val);
- return val;
+ return 1 << val;
}
static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u8 interrupts)
+ u8 nr_irqs)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- u32 val, reg;
- unsigned int func_offset = 0;
struct dw_pcie_ep_func *ep_func;
+ u8 mmc = order_base_2(nr_irqs);
+ u32 val, reg;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
- val = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_FLAGS;
+ val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
val &= ~PCI_MSI_FLAGS_QMASK;
- val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
+ val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, mmc);
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writew_dbi(pci, reg, val);
+ dw_pcie_ep_writew_dbi(ep, func_no, reg, val);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
@@ -368,35 +535,30 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- u32 val, reg;
- unsigned int func_offset = 0;
struct dw_pcie_ep_func *ep_func;
+ u32 val, reg;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msix_cap)
return -EINVAL;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
- val = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
+ val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
if (!(val & PCI_MSIX_FLAGS_ENABLE))
return -EINVAL;
val &= PCI_MSIX_FLAGS_QSIZE;
- return val;
+ return val + 1;
}
static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u16 interrupts, enum pci_barno bir, u32 offset)
+ u16 nr_irqs, enum pci_barno bir, u32 offset)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- u32 val, reg;
- unsigned int func_offset = 0;
struct dw_pcie_ep_func *ep_func;
+ u32 val, reg;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msix_cap)
@@ -404,21 +566,19 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
dw_pcie_dbi_ro_wr_en(pci);
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
- val = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
+ val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
- val |= interrupts;
+ val |= nr_irqs - 1; /* encoded as N-1 */
dw_pcie_writew_dbi(pci, reg, val);
- reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
+ reg = ep_func->msix_cap + PCI_MSIX_TABLE;
val = offset | bir;
- dw_pcie_writel_dbi(pci, reg, val);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
- reg = ep_func->msix_cap + func_offset + PCI_MSIX_PBA;
- val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
- dw_pcie_writel_dbi(pci, reg, val);
+ reg = ep_func->msix_cap + PCI_MSIX_PBA;
+ val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir;
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
dw_pcie_dbi_ro_wr_dis(pci);
@@ -426,7 +586,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
}
static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -467,6 +627,7 @@ static const struct pci_epc_ops epc_ops = {
.write_header = dw_pcie_ep_write_header,
.set_bar = dw_pcie_ep_set_bar,
.clear_bar = dw_pcie_ep_clear_bar,
+ .align_addr = dw_pcie_ep_align_addr,
.map_addr = dw_pcie_ep_map_addr,
.unmap_addr = dw_pcie_ep_unmap_addr,
.set_msi = dw_pcie_ep_set_msi,
@@ -479,62 +640,74 @@ static const struct pci_epc_ops epc_ops = {
.get_features = dw_pcie_ep_get_features,
};
-int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+/**
+ * dw_pcie_ep_raise_intx_irq - Raise INTx IRQ to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
- dev_err(dev, "EP cannot trigger legacy IRQs\n");
+ dev_err(dev, "EP cannot raise INTX IRQs\n");
return -EINVAL;
}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_legacy_irq);
+EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq);
+/**
+ * dw_pcie_ep_raise_msi_irq - Raise MSI IRQ to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 msg_addr_lower, msg_addr_upper, reg;
struct dw_pcie_ep_func *ep_func;
struct pci_epc *epc = ep->epc;
- unsigned int aligned_offset;
- unsigned int func_offset = 0;
+ size_t map_size = sizeof(u32);
+ size_t offset;
u16 msg_ctrl, msg_data;
- u32 msg_addr_lower, msg_addr_upper, reg;
- u64 msg_addr;
bool has_upper;
+ u64 msg_addr;
int ret;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
- reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
- msg_ctrl = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_FLAGS;
+ msg_ctrl = dw_pcie_ep_readw_dbi(ep, func_no, reg);
has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
- reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_LO;
- msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_ADDRESS_LO;
+ msg_addr_lower = dw_pcie_ep_readl_dbi(ep, func_no, reg);
if (has_upper) {
- reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_HI;
- msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
- reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_64;
- msg_data = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_ADDRESS_HI;
+ msg_addr_upper = dw_pcie_ep_readl_dbi(ep, func_no, reg);
+ reg = ep_func->msi_cap + PCI_MSI_DATA_64;
+ msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
} else {
msg_addr_upper = 0;
- reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_32;
- msg_data = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_DATA_32;
+ msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
}
- aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
- msg_addr = ((u64)msg_addr_upper) << 32 |
- (msg_addr_lower & ~aligned_offset);
+ msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower;
+
+ msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
- epc->mem->window.page_size);
+ map_size);
if (ret)
return ret;
- writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
+ writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset);
dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
@@ -542,6 +715,15 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq);
+/**
+ * dw_pcie_ep_raise_msix_irq_doorbell - Raise MSI-X to the host using Doorbell
+ * method
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
@@ -561,16 +743,24 @@ int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+/**
+ * dw_pcie_ep_raise_msix_irq - Raise MSI-X to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct dw_pcie_ep_func *ep_func;
struct pci_epf_msix_tbl *msix_tbl;
+ struct dw_pcie_ep_func *ep_func;
struct pci_epc *epc = ep->epc;
- unsigned int func_offset = 0;
+ size_t map_size = sizeof(u32);
+ size_t offset;
u32 reg, msg_data, vec_ctrl;
- unsigned int aligned_offset;
u32 tbl_offset;
u64 msg_addr;
int ret;
@@ -580,11 +770,9 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
if (!ep_func || !ep_func->msix_cap)
return -EINVAL;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
- tbl_offset = dw_pcie_readl_dbi(pci, reg);
- bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
+ reg = ep_func->msix_cap + PCI_MSIX_TABLE;
+ tbl_offset = dw_pcie_ep_readl_dbi(ep, func_no, reg);
+ bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset);
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
msix_tbl = ep->epf_bar[bir]->addr + tbl_offset;
@@ -597,55 +785,125 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
return -EPERM;
}
- aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
+ msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
- epc->mem->window.page_size);
+ map_size);
if (ret)
return ret;
- writel(msg_data, ep->msi_mem + aligned_offset);
+ writel(msg_data, ep->msi_mem + offset);
dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
return 0;
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msix_irq);
+
+/**
+ * dw_pcie_ep_cleanup - Cleanup DWC EP resources after fundamental reset
+ * @ep: DWC EP device
+ *
+ * Cleans up the DWC EP specific resources like eDMA etc... after fundamental
+ * reset like PERST#. Note that this API is only applicable for drivers
+ * supporting PERST# or any other methods of fundamental reset.
+ */
+void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ dwc_pcie_debugfs_deinit(pci);
+ dw_pcie_edma_remove(pci);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup);
-void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_deinit - Deinitialize the endpoint device
+ * @ep: DWC EP device
+ *
+ * Deinitialize the endpoint device. EPC device is not destroyed since that will
+ * be taken care by Devres.
+ */
+void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
+ dw_pcie_ep_cleanup(ep);
+
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
epc->mem->window.page_size);
pci_epc_mem_exit(epc);
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit);
-static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
+static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
{
- u32 header;
- int pos = PCI_CFG_SPACE_SIZE;
+ struct dw_pcie_ep *ep = &pci->ep;
+ unsigned int offset;
+ unsigned int nbars;
+ enum pci_barno bar;
+ u32 reg, i, val;
- while (pos) {
- header = dw_pcie_readl_dbi(pci, pos);
- if (PCI_EXT_CAP_ID(header) == cap)
- return pos;
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
- pos = PCI_EXT_CAP_NEXT(header);
- if (!pos)
- break;
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ if (offset) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
+
+ /*
+ * PCIe r6.0, sec 7.8.6.2 require us to support at least one
+ * size in the range from 1 MB to 512 GB. Advertise support
+ * for 1 MB BAR size only.
+ *
+ * For a BAR that has been configured via dw_pcie_ep_set_bar(),
+ * advertise support for only that size instead.
+ */
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
+ /*
+ * While the RESBAR_CAP_REG_* fields are sticky, the
+ * RESBAR_CTRL_REG_BAR_SIZE field is non-sticky (it is
+ * sticky in certain versions of DWC PCIe, but not all).
+ *
+ * RESBAR_CTRL_REG_BAR_SIZE is updated automatically by
+ * the controller when RESBAR_CAP_REG is written, which
+ * is why RESBAR_CAP_REG is written here.
+ */
+ val = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ bar = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, val);
+ if (ep->epf_bar[bar])
+ pci_epc_bar_size_to_rebar_cap(ep->epf_bar[bar]->size, &val);
+ else
+ val = BIT(4);
+
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, val);
+ }
}
- return 0;
+ dw_pcie_setup(pci);
+ dw_pcie_dbi_ro_wr_dis(pci);
}
-int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_init_registers - Initialize DWC EP specific registers
+ * @ep: DWC EP device
+ *
+ * Initialize the registers (CSRs) specific to DWC EP. This API should be called
+ * only when the endpoint receives an active refclk (either from host or
+ * generated locally).
+ */
+int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- unsigned int offset, ptm_cap_base;
- unsigned int nbars;
+ struct dw_pcie_ep_func *ep_func;
+ struct device *dev = pci->dev;
+ struct pci_epc *epc = ep->epc;
+ u32 ptm_cap_base, reg;
u8 hdr_type;
- u32 reg;
- int i;
+ u8 func_no;
+ void *addr;
+ int ret;
hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
PCI_HEADER_TYPE_MASK;
@@ -656,20 +914,61 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
return -EIO;
}
- offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
- ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
+ dw_pcie_version_detect(pci);
- dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_iatu_detect(pci);
- if (offset) {
- reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
- PCI_REBAR_CTRL_NBAR_SHIFT;
+ ret = dw_pcie_edma_detect(pci);
+ if (ret)
+ return ret;
+
+ ret = -ENOMEM;
+ if (!ep->ib_window_map) {
+ ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
+ GFP_KERNEL);
+ if (!ep->ib_window_map)
+ goto err_remove_edma;
+ }
+
+ if (!ep->ob_window_map) {
+ ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
+ GFP_KERNEL);
+ if (!ep->ob_window_map)
+ goto err_remove_edma;
+ }
+
+ if (!ep->outbound_addr) {
+ addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
+ GFP_KERNEL);
+ if (!addr)
+ goto err_remove_edma;
+ ep->outbound_addr = addr;
+ }
+
+ for (func_no = 0; func_no < epc->max_functions; func_no++) {
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (ep_func)
+ continue;
+
+ ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
+ if (!ep_func)
+ goto err_remove_edma;
+
+ ep_func->func_no = func_no;
+ ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
- for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
+ list_add_tail(&ep_func->list, &ep->func_list);
}
+ if (ep->ops->init)
+ ep->ops->init(ep);
+
+ ptm_cap_base = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
+
/*
* PTM responder capability can be disabled only after disabling
* PTM root capability.
@@ -686,28 +985,65 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
dw_pcie_dbi_ro_wr_dis(pci);
}
- dw_pcie_setup(pci);
- dw_pcie_dbi_ro_wr_dis(pci);
+ dw_pcie_ep_init_non_sticky_registers(pci);
+
+ dwc_pcie_debugfs_init(pci, DW_PCIE_EP_TYPE);
return 0;
+
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers);
-int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
+ * @ep: DWC EP device
+ */
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_linkup(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
+
+/**
+ * dw_pcie_ep_linkdown - Notify EPF drivers about Link Down event
+ * @ep: DWC EP device
+ *
+ * Non-sticky registers are also initialized before sending the notification to
+ * the EPF drivers. This is needed since the registers need to be initialized
+ * before the link comes back again.
+ */
+void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct pci_epc *epc = ep->epc;
+
+ /*
+ * Initialize the non-sticky DWC registers as they would've reset post
+ * Link Down. This is specifically needed for drivers not supporting
+ * PERST# as they have no way to reinitialize the registers before the
+ * link comes back again.
+ */
+ dw_pcie_ep_init_non_sticky_registers(pci);
+
+ pci_epc_linkdown(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown);
+
+static int dw_pcie_ep_get_resources(struct dw_pcie_ep *ep)
{
- int ret;
- void *addr;
- u8 func_no;
- struct resource *res;
- struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
- const struct pci_epc_features *epc_features;
- struct dw_pcie_ep_func *ep_func;
-
- INIT_LIST_HEAD(&ep->func_list);
+ struct pci_epc *epc = ep->epc;
+ struct resource *res;
+ int ret;
ret = dw_pcie_get_resources(pci);
if (ret)
@@ -720,25 +1056,37 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->phys_base = res->start;
ep->addr_size = resource_size(res);
- dw_pcie_version_detect(pci);
+ /*
+ * artpec6_pcie_cpu_addr_fixup() uses ep->phys_base, so call
+ * dw_pcie_parent_bus_offset() after setting ep->phys_base.
+ */
+ pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "addr_space",
+ ep->phys_base);
- dw_pcie_iatu_detect(pci);
+ ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+ if (ret < 0)
+ epc->max_functions = 1;
- ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
- GFP_KERNEL);
- if (!ep->ib_window_map)
- return -ENOMEM;
+ return 0;
+}
- ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
- GFP_KERNEL);
- if (!ep->ob_window_map)
- return -ENOMEM;
+/**
+ * dw_pcie_ep_init - Initialize the endpoint device
+ * @ep: DWC EP device
+ *
+ * Initialize the endpoint device. Allocate resources and create the EPC
+ * device with the endpoint framework.
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ int ret;
+ struct pci_epc *epc;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
- addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
- GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
- ep->outbound_addr = addr;
+ INIT_LIST_HEAD(&ep->func_list);
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
@@ -749,26 +1097,12 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->epc = epc;
epc_set_drvdata(epc, ep);
- ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
- if (ret < 0)
- epc->max_functions = 1;
-
- for (func_no = 0; func_no < epc->max_functions; func_no++) {
- ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
- if (!ep_func)
- return -ENOMEM;
-
- ep_func->func_no = func_no;
- ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSI);
- ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSIX);
-
- list_add_tail(&ep_func->list, &ep->func_list);
- }
+ ret = dw_pcie_ep_get_resources(ep);
+ if (ret)
+ return ret;
- if (ep->ops->ep_init)
- ep->ops->ep_init(ep);
+ if (ep->ops->pre_init)
+ ep->ops->pre_init(ep);
ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
ep->page_size);
@@ -785,22 +1119,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
goto err_exit_epc_mem;
}
- if (ep->ops->get_features) {
- epc_features = ep->ops->get_features(ep);
- if (epc_features->core_init_notifier)
- return 0;
- }
-
- ret = dw_pcie_ep_init_complete(ep);
- if (ret)
- goto err_free_epc_mem;
-
return 0;
-err_free_epc_mem:
- pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
- epc->mem->window.page_size);
-
err_exit_epc_mem:
pci_epc_mem_exit(epc);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 3ab6ae3712c4..372207c33a85 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -8,7 +8,10 @@
* Author: Jingoo Han <jg1.han@samsung.com>
*/
+#include <linux/align.h>
+#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/of_address.h>
@@ -16,39 +19,30 @@
#include <linux/pci_regs.h>
#include <linux/platform_device.h>
+#include "../../pci.h"
#include "pcie-designware.h"
static struct pci_ops dw_pcie_ops;
+static struct pci_ops dw_pcie_ecam_ops;
static struct pci_ops dw_child_pcie_ops;
-static void dw_msi_ack_irq(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void dw_msi_mask_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void dw_msi_unmask_irq(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip dw_pcie_msi_irq_chip = {
- .name = "PCI-MSI",
- .irq_ack = dw_msi_ack_irq,
- .irq_mask = dw_msi_mask_irq,
- .irq_unmask = dw_msi_unmask_irq,
-};
-
-static struct msi_domain_info dw_pcie_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
- .chip = &dw_pcie_msi_irq_chip,
+#define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+#define DW_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_GENERIC_FLAGS_MASK)
+
+#define IS_256MB_ALIGNED(x) IS_ALIGNED(x, SZ_256M)
+
+static const struct msi_parent_ops dw_pcie_msi_parent_ops = {
+ .required_flags = DW_PCIE_MSI_FLAGS_REQUIRED,
+ .supported_flags = DW_PCIE_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "DW-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
/* MSI int handler */
@@ -114,12 +108,6 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
(int)d->hwirq, msg->address_hi, msg->address_lo);
}
-static int dw_pci_msi_set_affinity(struct irq_data *d,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void dw_pci_bottom_mask(struct irq_data *d)
{
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
@@ -175,7 +163,6 @@ static struct irq_chip dw_pci_msi_bottom_irq_chip = {
.name = "DWPCI-MSI",
.irq_ack = dw_pci_bottom_ack,
.irq_compose_msi_msg = dw_pci_setup_msi_msg,
- .irq_set_affinity = dw_pci_msi_set_affinity,
.irq_mask = dw_pci_bottom_mask,
.irq_unmask = dw_pci_bottom_unmask,
};
@@ -231,30 +218,24 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
-
- pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
- &dw_pcie_msi_domain_ops, pp);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(pci->dev),
+ .ops = &dw_pcie_msi_domain_ops,
+ .size = pp->num_vectors,
+ .host_data = pp,
+ };
+
+ pp->irq_domain = msi_create_parent_irq_domain(&info, &dw_pcie_msi_parent_ops);
if (!pp->irq_domain) {
dev_err(pci->dev, "Failed to create IRQ domain\n");
return -ENOMEM;
}
- irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
-
- pp->msi_domain = pci_msi_create_irq_domain(fwnode,
- &dw_pcie_msi_domain_info,
- pp->irq_domain);
- if (!pp->msi_domain) {
- dev_err(pci->dev, "Failed to create MSI domain\n");
- irq_domain_remove(pp->irq_domain);
- return -ENOMEM;
- }
-
return 0;
}
+EXPORT_SYMBOL_GPL(dw_pcie_allocate_domains);
-static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
+void dw_pcie_free_msi(struct dw_pcie_rp *pp)
{
u32 ctrl;
@@ -264,22 +245,36 @@ static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
NULL, NULL);
}
- irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
}
+EXPORT_SYMBOL_GPL(dw_pcie_free_msi);
-static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
+void dw_pcie_msi_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target = (u64)pp->msi_data;
+ u32 ctrl, num_ctrls;
if (!pci_msi_enabled() || !pp->has_msi_ctrl)
return;
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /* Initialize IRQ Status array */
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ ~0);
+ }
+
/* Program the msi_data */
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
}
+EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
{
@@ -321,12 +316,12 @@ static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
return 0;
}
-static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
+int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
- u64 *msi_vaddr;
+ u64 *msi_vaddr = NULL;
int ret;
u32 ctrl, num_ctrls;
@@ -366,79 +361,241 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
dw_chained_msi_isr, pp);
}
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret)
- dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
+ /*
+ * Even though the iMSI-RX Module supports 64-bit addresses some
+ * peripheral PCIe devices may lack 64-bit message support. In
+ * order not to miss MSI TLPs from those devices the MSI target
+ * address has to be within the lowest 4GB.
+ *
+ * Note until there is a better alternative found the reservation is
+ * done by allocating from the artificially limited DMA-coherent
+ * memory.
+ */
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (!ret)
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
- msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
- GFP_KERNEL);
if (!msi_vaddr) {
- dev_err(dev, "Failed to alloc and map MSI data\n");
- dw_pcie_free_msi(pp);
- return -ENOMEM;
+ dev_warn(dev, "Failed to allocate 32-bit MSI address\n");
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
+ if (!msi_vaddr) {
+ dev_err(dev, "Failed to allocate MSI address\n");
+ dw_pcie_free_msi(pp);
+ return -ENOMEM;
+ }
}
return 0;
}
+EXPORT_SYMBOL_GPL(dw_pcie_msi_host_init);
-int dw_pcie_host_init(struct dw_pcie_rp *pp)
+static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
- struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win;
- struct pci_host_bridge *bridge;
struct resource *res;
+
+ win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
+ if (win) {
+ res = devm_kzalloc(pci->dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return;
+
+ /*
+ * Allocate MSG TLP region of size 'region_align' at the end of
+ * the host bridge window.
+ */
+ res->start = win->res->end - pci->region_align + 1;
+ res->end = win->res->end;
+ res->name = "msg";
+ res->flags = win->res->flags | IORESOURCE_BUSY;
+
+ if (!devm_request_resource(pci->dev, win->res, res))
+ pp->msg_res = res;
+ }
+}
+
+static int dw_pcie_config_ecam_iatu(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = {0};
+ resource_size_t bus_range_max;
+ struct resource_entry *bus;
int ret;
- raw_spin_lock_init(&pp->lock);
+ bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
- ret = dw_pcie_get_resources(pci);
+ /*
+ * Root bus under the host bridge doesn't require any iATU configuration
+ * as DBI region will be used to access root bus config space.
+ * Immediate bus under Root Bus, needs type 0 iATU configuration and
+ * remaining buses need type 1 iATU configuration.
+ */
+ atu.index = 0;
+ atu.type = PCIE_ATU_TYPE_CFG0;
+ atu.parent_bus_addr = pp->cfg0_base + SZ_1M;
+ /* 1MiB is to cover 1 (bus) * 32 (devices) * 8 (functions) */
+ atu.size = SZ_1M;
+ atu.ctrl2 = PCIE_ATU_CFG_SHIFT_MODE_ENABLE;
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return ret;
+ bus_range_max = resource_size(bus->res);
+
+ if (bus_range_max < 2)
+ return 0;
+
+ /* Configure remaining buses in type 1 iATU configuration */
+ atu.index = 1;
+ atu.type = PCIE_ATU_TYPE_CFG1;
+ atu.parent_bus_addr = pp->cfg0_base + SZ_2M;
+ atu.size = (SZ_1M * bus_range_max) - SZ_2M;
+ atu.ctrl2 = PCIE_ATU_CFG_SHIFT_MODE_ENABLE;
+
+ return dw_pcie_prog_outbound_atu(pci, &atu);
+}
+
+static int dw_pcie_create_ecam_window(struct dw_pcie_rp *pp, struct resource *res)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct resource_entry *bus;
+
+ bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
+ if (!bus)
+ return -ENODEV;
+
+ pp->cfg = pci_ecam_create(dev, res, bus->res, &pci_generic_ecam_ops);
+ if (IS_ERR(pp->cfg))
+ return PTR_ERR(pp->cfg);
+
+ return 0;
+}
+
+static bool dw_pcie_ecam_enabled(struct dw_pcie_rp *pp, struct resource *config_res)
+{
+ struct resource *bus_range;
+ u64 nr_buses;
+
+ /* Vendor glue drivers may implement their own ECAM mechanism */
+ if (pp->native_ecam)
+ return false;
+
+ /*
+ * PCIe spec r6.0, sec 7.2.2 mandates the base address used for ECAM to
+ * be aligned on a 2^(n+20) byte boundary, where n is the number of bits
+ * used for representing 'bus' in BDF. Since the DWC cores always use 8
+ * bits for representing 'bus', the base address has to be aligned to
+ * 2^28 byte boundary, which is 256 MiB.
+ */
+ if (!IS_256MB_ALIGNED(config_res->start))
+ return false;
+
+ bus_range = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
+ if (!bus_range)
+ return false;
+
+ nr_buses = resource_size(config_res) >> PCIE_ECAM_BUS_SHIFT;
+
+ return nr_buses >= resource_size(bus_range);
+}
+
+static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource_entry *win;
+ struct resource *res;
+ int ret;
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
- if (res) {
- pp->cfg0_size = resource_size(res);
- pp->cfg0_base = res->start;
+ if (!res) {
+ dev_err(dev, "Missing \"config\" reg space\n");
+ return -ENODEV;
+ }
+
+ pp->cfg0_size = resource_size(res);
+ pp->cfg0_base = res->start;
+
+ pp->ecam_enabled = dw_pcie_ecam_enabled(pp, res);
+ if (pp->ecam_enabled) {
+ ret = dw_pcie_create_ecam_window(pp, res);
+ if (ret)
+ return ret;
+ pp->bridge->ops = &dw_pcie_ecam_ops;
+ pp->bridge->sysdata = pp->cfg;
+ pp->cfg->priv = pp;
+ } else {
pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pp->va_cfg0_base))
return PTR_ERR(pp->va_cfg0_base);
- } else {
- dev_err(dev, "Missing *config* reg space\n");
- return -ENODEV;
- }
- bridge = devm_pci_alloc_host_bridge(dev, 0);
- if (!bridge)
- return -ENOMEM;
+ /* Set default bus ops */
+ pp->bridge->ops = &dw_pcie_ops;
+ pp->bridge->child_ops = &dw_child_pcie_ops;
+ pp->bridge->sysdata = pp;
+ }
- pp->bridge = bridge;
+ ret = dw_pcie_get_resources(pci);
+ if (ret) {
+ if (pp->cfg)
+ pci_ecam_free(pp->cfg);
+ return ret;
+ }
/* Get the I/O range from DT */
- win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
+ win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_IO);
if (win) {
pp->io_size = resource_size(win->res);
pp->io_bus_addr = win->res->start - win->offset;
pp->io_base = pci_pio_to_address(win->res->start);
}
- /* Set default bus ops */
- bridge->ops = &dw_pcie_ops;
- bridge->child_ops = &dw_child_pcie_ops;
+ /*
+ * visconti_pcie_cpu_addr_fixup() uses pp->io_base, so we have to
+ * call dw_pcie_parent_bus_offset() after setting pp->io_base.
+ */
+ pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "config",
+ pp->cfg0_base);
+ return 0;
+}
+
+int dw_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ struct pci_host_bridge *bridge;
+ int ret;
+
+ raw_spin_lock_init(&pp->lock);
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return -ENOMEM;
+
+ pp->bridge = bridge;
- if (pp->ops->host_init) {
- ret = pp->ops->host_init(pp);
+ ret = dw_pcie_host_get_resources(pp);
+ if (ret)
+ return ret;
+
+ if (pp->ops->init) {
+ ret = pp->ops->init(pp);
if (ret)
- return ret;
+ goto err_free_ecam;
}
if (pci_msi_enabled()) {
- pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
- of_property_read_bool(np, "msi-parent") ||
- of_property_read_bool(np, "msi-map"));
+ pp->has_msi_ctrl = !(pp->ops->msi_init ||
+ of_property_present(np, "msi-parent") ||
+ of_property_present(np, "msi-map"));
/*
* For the has_msi_ctrl case the default assignment is handled
@@ -452,8 +609,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
goto err_deinit_host;
}
- if (pp->ops->msi_host_init) {
- ret = pp->ops->msi_host_init(pp);
+ if (pp->ops->msi_init) {
+ ret = pp->ops->msi_init(pp);
if (ret < 0)
goto err_deinit_host;
} else if (pp->has_msi_ctrl) {
@@ -467,37 +624,84 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
dw_pcie_iatu_detect(pci);
- ret = dw_pcie_setup_rc(pp);
+ if (pci->num_lanes < 1)
+ pci->num_lanes = dw_pcie_link_get_max_link_width(pci);
+
+ ret = of_pci_get_equalization_presets(dev, &pp->presets, pci->num_lanes);
if (ret)
goto err_free_msi;
+ if (pp->ecam_enabled) {
+ ret = dw_pcie_config_ecam_iatu(pp);
+ if (ret) {
+ dev_err(dev, "Failed to configure iATU in ECAM mode\n");
+ goto err_free_msi;
+ }
+ }
+
+ /*
+ * Allocate the resource for MSG TLP before programming the iATU
+ * outbound window in dw_pcie_setup_rc(). Since the allocation depends
+ * on the value of 'region_align', this has to be done after
+ * dw_pcie_iatu_detect().
+ *
+ * Glue drivers need to set 'use_atu_msg' before dw_pcie_host_init() to
+ * make use of the generic MSG TLP implementation.
+ */
+ if (pp->use_atu_msg)
+ dw_pcie_host_request_msg_tlp_res(pp);
+
+ ret = dw_pcie_edma_detect(pci);
+ if (ret)
+ goto err_free_msi;
+
+ ret = dw_pcie_setup_rc(pp);
+ if (ret)
+ goto err_remove_edma;
+
if (!dw_pcie_link_up(pci)) {
ret = dw_pcie_start_link(pci);
if (ret)
- goto err_free_msi;
+ goto err_remove_edma;
}
- /* Ignore errors, the link may come up later */
- dw_pcie_wait_for_link(pci);
-
- bridge->sysdata = pp;
+ /*
+ * Note: Skip the link up delay only when a Link Up IRQ is present.
+ * If there is no Link Up IRQ, we should not bypass the delay
+ * because that would require users to manually rescan for devices.
+ */
+ if (!pp->use_linkup_irq)
+ /* Ignore errors, the link may come up later */
+ dw_pcie_wait_for_link(pci);
ret = pci_host_probe(bridge);
if (ret)
goto err_stop_link;
+ if (pp->ops->post_init)
+ pp->ops->post_init(pp);
+
+ dwc_pcie_debugfs_init(pci, DW_PCIE_RC_TYPE);
+
return 0;
err_stop_link:
dw_pcie_stop_link(pci);
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
err_free_msi:
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
err_deinit_host:
- if (pp->ops->host_deinit)
- pp->ops->host_deinit(pp);
+ if (pp->ops->deinit)
+ pp->ops->deinit(pp);
+
+err_free_ecam:
+ if (pp->cfg)
+ pci_ecam_free(pp->cfg);
return ret;
}
@@ -507,16 +711,23 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ dwc_pcie_debugfs_deinit(pci);
+
pci_stop_root_bus(pp->bridge->bus);
pci_remove_root_bus(pp->bridge->bus);
dw_pcie_stop_link(pci);
+ dw_pcie_edma_remove(pci);
+
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
- if (pp->ops->host_deinit)
- pp->ops->host_deinit(pp);
+ if (pp->ops->deinit)
+ pp->ops->deinit(pp);
+
+ if (pp->cfg)
+ pci_ecam_free(pp->cfg);
}
EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
@@ -525,6 +736,7 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
{
struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
int type, ret;
u32 busdev;
@@ -547,8 +759,12 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
else
type = PCIE_ATU_TYPE_CFG1;
- ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
- pp->cfg0_size);
+ atu.type = type;
+ atu.parent_bus_addr = pp->cfg0_base - pci->parent_bus_offset;
+ atu.pci_addr = busdev;
+ atu.size = pp->cfg0_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return NULL;
@@ -560,6 +776,7 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
{
struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
int ret;
ret = pci_generic_config_read(bus, devfn, where, size, val);
@@ -567,9 +784,12 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
return ret;
if (pp->cfg0_io_shared) {
- ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
- pp->io_base, pp->io_bus_addr,
- pp->io_size);
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return PCIBIOS_SET_FAILED;
}
@@ -582,6 +802,7 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
{
struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
int ret;
ret = pci_generic_config_write(bus, devfn, where, size, val);
@@ -589,9 +810,12 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
return ret;
if (pp->cfg0_io_shared) {
- ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
- pp->io_base, pp->io_bus_addr,
- pp->io_size);
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return PCIBIOS_SET_FAILED;
}
@@ -617,15 +841,47 @@ void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
}
EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
+static void __iomem *dw_pcie_ecam_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct dw_pcie_rp *pp = cfg->priv;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int busn = bus->number;
+
+ if (busn > 0)
+ return pci_ecam_map_bus(bus, devfn, where);
+
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
+
+ return pci->dbi_base + where;
+}
+
+static int dw_pcie_op_assert_perst(struct pci_bus *bus, bool assert)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ return dw_pcie_assert_perst(pci, assert);
+}
+
static struct pci_ops dw_pcie_ops = {
.map_bus = dw_pcie_own_conf_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
+ .assert_perst = dw_pcie_op_assert_perst,
+};
+
+static struct pci_ops dw_pcie_ecam_ops = {
+ .map_bus = dw_pcie_ecam_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
};
static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
struct resource_entry *entry;
int i, ret;
@@ -653,10 +909,19 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
if (pci->num_ob_windows <= ++i)
break;
- ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
- entry->res->start,
- entry->res->start - entry->offset,
- resource_size(entry->res));
+ atu.index = i;
+ atu.type = PCIE_ATU_TYPE_MEM;
+ atu.parent_bus_addr = entry->res->start - pci->parent_bus_offset;
+ atu.pci_addr = entry->res->start - entry->offset;
+
+ /* Adjust iATU size if MSG TLP region was allocated before */
+ if (pp->msg_res && pp->msg_res->parent == entry->res)
+ atu.size = resource_size(entry->res) -
+ resource_size(pp->msg_res);
+ else
+ atu.size = resource_size(entry->res);
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret) {
dev_err(pci->dev, "Failed to set MEM range %pr\n",
entry->res);
@@ -666,10 +931,13 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
if (pp->io_size) {
if (pci->num_ob_windows > ++i) {
- ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
- pp->io_base,
- pp->io_bus_addr,
- pp->io_size);
+ atu.index = i;
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret) {
dev_err(pci->dev, "Failed to set IO range %pr\n",
entry->res);
@@ -684,6 +952,8 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
pci->num_ob_windows);
+ pp->msg_atu_index = i;
+
i = 0;
resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
if (resource_type(entry->res) != IORESOURCE_MEM)
@@ -710,10 +980,81 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
return 0;
}
+static void dw_pcie_program_presets(struct dw_pcie_rp *pp, enum pci_bus_speed speed)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u8 lane_eq_offset, lane_reg_size, cap_id;
+ u8 *presets;
+ u32 cap;
+ int i;
+
+ if (speed == PCIE_SPEED_8_0GT) {
+ presets = (u8 *)pp->presets.eq_presets_8gts;
+ lane_eq_offset = PCI_SECPCI_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_SECPCI;
+ /* For data rate of 8 GT/S each lane equalization control is 16bits wide*/
+ lane_reg_size = 0x2;
+ } else if (speed == PCIE_SPEED_16_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_16GTS - 1];
+ lane_eq_offset = PCI_PL_16GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_16GT;
+ lane_reg_size = 0x1;
+ } else if (speed == PCIE_SPEED_32_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_32GTS - 1];
+ lane_eq_offset = PCI_PL_32GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_32GT;
+ lane_reg_size = 0x1;
+ } else if (speed == PCIE_SPEED_64_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_64GTS - 1];
+ lane_eq_offset = PCI_PL_64GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_64GT;
+ lane_reg_size = 0x1;
+ } else {
+ return;
+ }
+
+ if (presets[0] == PCI_EQ_RESV)
+ return;
+
+ cap = dw_pcie_find_ext_capability(pci, cap_id);
+ if (!cap)
+ return;
+
+ /*
+ * Write preset values to the registers byte-by-byte for the given
+ * number of lanes and register size.
+ */
+ for (i = 0; i < pci->num_lanes * lane_reg_size; i++)
+ dw_pcie_writeb_dbi(pci, cap + lane_eq_offset + i, presets[i]);
+}
+
+static void dw_pcie_config_presets(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ enum pci_bus_speed speed = pcie_link_speed[pci->max_link_speed];
+
+ /*
+ * Lane equalization settings need to be applied for all data rates the
+ * controller supports and for all supported lanes.
+ */
+
+ if (speed >= PCIE_SPEED_8_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_8_0GT);
+
+ if (speed >= PCIE_SPEED_16_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_16_0GT);
+
+ if (speed >= PCIE_SPEED_32_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_32_0GT);
+
+ if (speed >= PCIE_SPEED_64_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_64_0GT);
+}
+
int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- u32 val, ctrl, num_ctrls;
+ u32 val;
int ret;
/*
@@ -724,20 +1065,6 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
dw_pcie_setup(pci);
- if (pp->has_msi_ctrl) {
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
-
- /* Initialize IRQ Status array */
- for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- pp->irq_mask[ctrl]);
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- ~0);
- }
- }
-
dw_pcie_msi_init(pp);
/* Setup RC BARs */
@@ -763,6 +1090,9 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+ dw_pcie_hide_unsupported_l1ss(pci);
+
+ dw_pcie_config_presets(pp);
/*
* If the platform provides its own child bus config accesses, it means
* the platform uses its own address translation component rather than
@@ -788,3 +1118,119 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
return 0;
}
EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
+
+static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
+{
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
+ void __iomem *mem;
+ int ret;
+
+ if (pci->num_ob_windows <= pci->pp.msg_atu_index)
+ return -ENOSPC;
+
+ if (!pci->pp.msg_res)
+ return -ENOSPC;
+
+ atu.code = PCIE_MSG_CODE_PME_TURN_OFF;
+ atu.routing = PCIE_MSG_TYPE_R_BC;
+ atu.type = PCIE_ATU_TYPE_MSG;
+ atu.size = resource_size(pci->pp.msg_res);
+ atu.index = pci->pp.msg_atu_index;
+
+ atu.parent_bus_addr = pci->pp.msg_res->start - pci->parent_bus_offset;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
+ if (ret)
+ return ret;
+
+ mem = ioremap(pci->pp.msg_res->start, pci->region_align);
+ if (!mem)
+ return -ENOMEM;
+
+ /* A dummy write is converted to a Msg TLP */
+ writel(0, mem);
+
+ iounmap(mem);
+
+ return 0;
+}
+
+int dw_pcie_suspend_noirq(struct dw_pcie *pci)
+{
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+ int ret;
+
+ /*
+ * If L1SS is supported, then do not put the link into L2 as some
+ * devices such as NVMe expect low resume latency.
+ */
+ if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
+ return 0;
+
+ if (pci->pp.ops->pme_turn_off) {
+ pci->pp.ops->pme_turn_off(&pci->pp);
+ } else {
+ ret = dw_pcie_pme_turn_off(pci);
+ if (ret)
+ return ret;
+ }
+
+ ret = read_poll_timeout(dw_pcie_get_ltssm, val,
+ val == DW_PCIE_LTSSM_L2_IDLE ||
+ val <= DW_PCIE_LTSSM_DETECT_WAIT,
+ PCIE_PME_TO_L2_TIMEOUT_US/10,
+ PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
+ if (ret) {
+ /* Only log message when LTSSM isn't in DETECT or POLL */
+ dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
+ return ret;
+ }
+
+ /*
+ * Per PCIe r6.0, sec 5.3.3.2.1, software should wait at least
+ * 100ns after L2/L3 Ready before turning off refclock and
+ * main power. This is harmless when no endpoint is connected.
+ */
+ udelay(1);
+
+ dw_pcie_stop_link(pci);
+ if (pci->pp.ops->deinit)
+ pci->pp.ops->deinit(&pci->pp);
+
+ pci->suspended = true;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq);
+
+int dw_pcie_resume_noirq(struct dw_pcie *pci)
+{
+ int ret;
+
+ if (!pci->suspended)
+ return 0;
+
+ pci->suspended = false;
+
+ if (pci->pp.ops->init) {
+ ret = pci->pp.ops->init(&pci->pp);
+ if (ret) {
+ dev_err(pci->dev, "Host init failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ dw_pcie_setup_rc(&pci->pp);
+
+ ret = dw_pcie_start_link(pci);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 1fcfb840f238..12f41886c65d 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -12,7 +12,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
@@ -42,17 +42,16 @@ static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
}
static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return dw_pcie_ep_raise_legacy_irq(ep, func_no);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
@@ -62,7 +61,6 @@ static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features dw_plat_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
};
@@ -74,7 +72,7 @@ dw_plat_pcie_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .ep_init = dw_plat_pcie_ep_init,
+ .init = dw_plat_pcie_ep_init,
.raise_irq = dw_plat_pcie_ep_raise_irq,
.get_features = dw_plat_pcie_get_features,
};
@@ -146,6 +144,17 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
break;
default:
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 6d5d619ab2e9..75fc8b767fcc 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -12,10 +12,13 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma/edma.h>
#include <linux/gpio/consumer.h>
#include <linux/ioport.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/pcie-dwc.h>
+#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/types.h>
@@ -51,6 +54,14 @@ static const char * const dw_pcie_core_rsts[DW_PCIE_NUM_CORE_RSTS] = {
[DW_PCIE_PWR_RST] = "pwr",
};
+static const struct dwc_pcie_vsec_id dwc_pcie_ptm_vsec_ids[] = {
+ { .vendor_id = PCI_VENDOR_ID_QCOM, /* EP */
+ .vsec_id = 0x03, .vsec_rev = 0x1 },
+ { .vendor_id = PCI_VENDOR_ID_QCOM, /* RC */
+ .vsec_id = 0x04, .vsec_rev = 0x1 },
+ { }
+};
+
static int dw_pcie_get_clocks(struct dw_pcie *pci)
{
int i, ret;
@@ -111,6 +122,7 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
+ pci->dbi_phys_addr = res->start;
}
/* DBI2 is mainly useful for the endpoint controller */
@@ -133,6 +145,7 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
pci->atu_base = devm_ioremap_resource(pci->dev, res);
if (IS_ERR(pci->atu_base))
return PTR_ERR(pci->atu_base);
+ pci->atu_phys_addr = res->start;
} else {
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
}
@@ -142,6 +155,28 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
if (!pci->atu_size)
pci->atu_size = SZ_4K;
+ /* eDMA region can be mapped to a custom base address */
+ if (!pci->edma.reg_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
+ if (res) {
+ pci->edma.reg_base = devm_ioremap_resource(pci->dev, res);
+ if (IS_ERR(pci->edma.reg_base))
+ return PTR_ERR(pci->edma.reg_base);
+ } else if (pci->atu_size >= 2 * DEFAULT_DBI_DMA_OFFSET) {
+ pci->edma.reg_base = pci->atu_base + DEFAULT_DBI_DMA_OFFSET;
+ }
+ }
+
+ /* ELBI is an optional resource */
+ if (!pci->elbi_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ if (res) {
+ pci->elbi_base = devm_ioremap_resource(pci->dev, res);
+ if (IS_ERR(pci->elbi_base))
+ return PTR_ERR(pci->elbi_base);
+ }
+ }
+
/* LLDD is supposed to manually switch the clocks and resets state */
if (dw_pcie_cap_is(pci, REQ_RES)) {
ret = dw_pcie_get_clocks(pci);
@@ -153,8 +188,8 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
return ret;
}
- if (pci->link_gen < 1)
- pci->link_gen = of_pci_get_max_link_speed(np);
+ if (pci->max_link_speed < 1)
+ pci->max_link_speed = of_pci_get_max_link_speed(np);
of_property_read_u32(np, "num-lanes", &pci->num_lanes);
@@ -188,85 +223,69 @@ void dw_pcie_version_detect(struct dw_pcie *pci)
pci->type = ver;
}
-/*
- * These interfaces resemble the pci_find_*capability() interfaces, but these
- * are for configuring host controllers, which are bridges *to* PCI devices but
- * are not PCI devices themselves.
- */
-static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
- u8 cap)
+u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
{
- u8 cap_id, next_cap_ptr;
- u16 reg;
-
- if (!cap_ptr)
- return 0;
-
- reg = dw_pcie_readw_dbi(pci, cap_ptr);
- cap_id = (reg & 0x00ff);
-
- if (cap_id > PCI_CAP_ID_MAX)
- return 0;
-
- if (cap_id == cap)
- return cap_ptr;
-
- next_cap_ptr = (reg & 0xff00) >> 8;
- return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
+ return PCI_FIND_NEXT_CAP(dw_pcie_read_cfg, PCI_CAPABILITY_LIST, cap,
+ pci);
}
+EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
-u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
+u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
{
- u8 next_cap_ptr;
- u16 reg;
-
- reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
- next_cap_ptr = (reg & 0x00ff);
-
- return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
+ return PCI_FIND_NEXT_EXT_CAP(dw_pcie_read_cfg, 0, cap, pci);
}
-EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
+EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
-static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
- u8 cap)
+static u16 __dw_pcie_find_vsec_capability(struct dw_pcie *pci, u16 vendor_id,
+ u16 vsec_id)
{
+ u16 vsec = 0;
u32 header;
- int ttl;
- int pos = PCI_CFG_SPACE_SIZE;
-
- /* minimum 8 bytes per capability */
- ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
-
- if (start)
- pos = start;
- header = dw_pcie_readl_dbi(pci, pos);
- /*
- * If we have no capabilities, this is indicated by cap ID,
- * cap version and next pointer all being 0.
- */
- if (header == 0)
+ if (vendor_id != dw_pcie_readw_dbi(pci, PCI_VENDOR_ID))
return 0;
- while (ttl-- > 0) {
- if (PCI_EXT_CAP_ID(header) == cap && pos != start)
- return pos;
+ while ((vsec = PCI_FIND_NEXT_EXT_CAP(dw_pcie_read_cfg, vsec,
+ PCI_EXT_CAP_ID_VNDR, pci))) {
+ header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
+ if (PCI_VNDR_HEADER_ID(header) == vsec_id)
+ return vsec;
+ }
- pos = PCI_EXT_CAP_NEXT(header);
- if (pos < PCI_CFG_SPACE_SIZE)
- break;
+ return 0;
+}
+
+static u16 dw_pcie_find_vsec_capability(struct dw_pcie *pci,
+ const struct dwc_pcie_vsec_id *vsec_ids)
+{
+ const struct dwc_pcie_vsec_id *vid;
+ u16 vsec;
+ u32 header;
- header = dw_pcie_readl_dbi(pci, pos);
+ for (vid = vsec_ids; vid->vendor_id; vid++) {
+ vsec = __dw_pcie_find_vsec_capability(pci, vid->vendor_id,
+ vid->vsec_id);
+ if (vsec) {
+ header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
+ if (PCI_VNDR_HEADER_REV(header) == vid->vsec_rev)
+ return vsec;
+ }
}
return 0;
}
-u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
+u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci)
{
- return dw_pcie_find_next_ext_capability(pci, 0, cap);
+ return dw_pcie_find_vsec_capability(pci, dwc_pcie_rasdes_vsec_ids);
}
-EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
+EXPORT_SYMBOL_GPL(dw_pcie_find_rasdes_capability);
+
+u16 dw_pcie_find_ptm_capability(struct dw_pcie *pci)
+{
+ return dw_pcie_find_vsec_capability(pci, dwc_pcie_ptm_vsec_ids);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_ptm_capability);
int dw_pcie_read(void __iomem *addr, int size, u32 *val)
{
@@ -352,6 +371,7 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
if (ret)
dev_err(pci->dev, "write DBI address failed\n");
}
+EXPORT_SYMBOL_GPL(dw_pcie_write_dbi2);
static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir,
u32 index)
@@ -451,56 +471,58 @@ static inline u32 dw_pcie_enable_ecrc(u32 val)
return val | PCIE_ATU_TD;
}
-static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
- int index, int type, u64 cpu_addr,
- u64 pci_addr, u64 size)
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
+ const struct dw_pcie_ob_atu_cfg *atu)
{
+ u64 parent_bus_addr = atu->parent_bus_addr;
u32 retries, val;
u64 limit_addr;
- if (pci->ops && pci->ops->cpu_addr_fixup)
- cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
-
- limit_addr = cpu_addr + size - 1;
+ limit_addr = parent_bus_addr + atu->size - 1;
- if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
- !IS_ALIGNED(cpu_addr, pci->region_align) ||
- !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
+ if ((limit_addr & ~pci->region_limit) != (parent_bus_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(parent_bus_addr, pci->region_align) ||
+ !IS_ALIGNED(atu->pci_addr, pci->region_align) || !atu->size) {
return -EINVAL;
}
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE,
- upper_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(parent_bus_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(parent_bus_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT,
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LIMIT,
lower_32_bits(limit_addr));
if (dw_pcie_ver_is_ge(pci, 460A))
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT,
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_LIMIT,
upper_32_bits(limit_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(pci_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(atu->pci_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(atu->pci_addr));
- val = type | PCIE_ATU_FUNC_NUM(func_no);
- if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
+ val = atu->type | atu->routing | PCIE_ATU_FUNC_NUM(atu->func_no);
+ if (upper_32_bits(limit_addr) > upper_32_bits(parent_bus_addr) &&
dw_pcie_ver_is_ge(pci, 460A))
val |= PCIE_ATU_INCREASE_REGION_SIZE;
if (dw_pcie_ver_is(pci, 490A))
val = dw_pcie_enable_ecrc(val);
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val);
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL1, val);
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
+ val = PCIE_ATU_ENABLE | atu->ctrl2;
+ if (atu->type == PCIE_ATU_TYPE_MSG) {
+ /* The data-less messages only for now */
+ val |= PCIE_ATU_INHIBIT_PAYLOAD | atu->code;
+ }
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2, val);
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2);
+ val = dw_pcie_readl_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
return 0;
@@ -512,21 +534,6 @@ static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
return -ETIMEDOUT;
}
-int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size)
-{
- return __dw_pcie_prog_outbound_atu(pci, 0, index, type,
- cpu_addr, pci_addr, size);
-}
-
-int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u64 size)
-{
- return __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
- cpu_addr, pci_addr, size);
-}
-
static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
{
return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
@@ -539,13 +546,13 @@ static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg
}
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size)
+ u64 parent_bus_addr, u64 pci_addr, u64 size)
{
u64 limit_addr = pci_addr + size - 1;
u32 retries, val;
if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) ||
- !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(parent_bus_addr, pci->region_align) ||
!IS_ALIGNED(pci_addr, pci->region_align) || !size) {
return -EINVAL;
}
@@ -562,9 +569,9 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
upper_32_bits(limit_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(cpu_addr));
+ lower_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(cpu_addr));
+ upper_32_bits(parent_bus_addr));
val = type;
if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) &&
@@ -591,17 +598,18 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
}
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar)
+ int type, u64 parent_bus_addr, u8 bar, size_t size)
{
u32 retries, val;
- if (!IS_ALIGNED(cpu_addr, pci->region_align))
+ if (!IS_ALIGNED(parent_bus_addr, pci->region_align) ||
+ !IS_ALIGNED(parent_bus_addr, size))
return -EINVAL;
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(cpu_addr));
+ lower_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(cpu_addr));
+ upper_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
PCIE_ATU_FUNC_NUM(func_no));
@@ -637,18 +645,26 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
int retries;
/* Check if the link is up or not */
- for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ for (retries = 0; retries < PCIE_LINK_WAIT_MAX_RETRIES; retries++) {
if (dw_pcie_link_up(pci))
break;
- usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ msleep(PCIE_LINK_WAIT_SLEEP_MS);
}
- if (retries >= LINK_WAIT_MAX_RETRIES) {
+ if (retries >= PCIE_LINK_WAIT_MAX_RETRIES) {
dev_info(pci->dev, "Phy link never came up\n");
return -ETIMEDOUT;
}
+ /*
+ * As per PCIe r6.0, sec 6.6.1, a Downstream Port that supports Link
+ * speeds greater than 5.0 GT/s, software must wait a minimum of 100 ms
+ * after Link training completes before sending a Configuration Request.
+ */
+ if (pci->max_link_speed > 2)
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+
offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
@@ -660,7 +676,7 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
}
EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
-int dw_pcie_link_up(struct dw_pcie *pci)
+bool dw_pcie_link_up(struct dw_pcie *pci)
{
u32 val;
@@ -683,16 +699,27 @@ void dw_pcie_upconfig_setup(struct dw_pcie *pci)
}
EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
-static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
+static void dw_pcie_link_set_max_speed(struct dw_pcie *pci)
{
u32 cap, ctrl2, link_speed;
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+
+ /*
+ * Even if the platform doesn't want to limit the maximum link speed,
+ * just cache the hardware default value so that the vendor drivers can
+ * use it to do any link specific configuration.
+ */
+ if (pci->max_link_speed < 1) {
+ pci->max_link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
+ return;
+ }
+
ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
- switch (pcie_link_speed[link_gen]) {
+ switch (pcie_link_speed[pci->max_link_speed]) {
case PCIE_SPEED_2_5GT:
link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
break;
@@ -719,6 +746,61 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
}
+int dw_pcie_link_get_max_link_width(struct dw_pcie *pci)
+{
+ u8 cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
+
+ return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
+}
+
+static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
+{
+ u32 lnkcap, lwsc, plc;
+ u8 cap;
+
+ if (!num_lanes)
+ return;
+
+ /* Set the number of lanes */
+ plc = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+ plc &= ~PORT_LINK_FAST_LINK_MODE;
+ plc &= ~PORT_LINK_MODE_MASK;
+
+ /* Set link width speed control register */
+ lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+ lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+ switch (num_lanes) {
+ case 1:
+ plc |= PORT_LINK_MODE_1_LANES;
+ break;
+ case 2:
+ plc |= PORT_LINK_MODE_2_LANES;
+ break;
+ case 4:
+ plc |= PORT_LINK_MODE_4_LANES;
+ break;
+ case 8:
+ plc |= PORT_LINK_MODE_8_LANES;
+ break;
+ case 16:
+ plc |= PORT_LINK_MODE_16_LANES;
+ break;
+ default:
+ dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
+ return;
+ }
+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, plc);
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, lwsc);
+
+ cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
+ lnkcap &= ~PCI_EXP_LNKCAP_MLW;
+ lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, num_lanes);
+ dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap);
+}
+
void dw_pcie_iatu_detect(struct dw_pcie *pci)
{
int max_region, ob, ib;
@@ -782,12 +864,254 @@ void dw_pcie_iatu_detect(struct dw_pcie *pci)
pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
}
+static u32 dw_pcie_readl_dma(struct dw_pcie *pci, u32 reg)
+{
+ u32 val = 0;
+ int ret;
+
+ if (pci->ops && pci->ops->read_dbi)
+ return pci->ops->read_dbi(pci, pci->edma.reg_base, reg, 4);
+
+ ret = dw_pcie_read(pci->edma.reg_base + reg, 4, &val);
+ if (ret)
+ dev_err(pci->dev, "Read DMA address failed\n");
+
+ return val;
+}
+
+static int dw_pcie_edma_irq_vector(struct device *dev, unsigned int nr)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ char name[6];
+ int ret;
+
+ if (nr >= EDMA_MAX_WR_CH + EDMA_MAX_RD_CH)
+ return -EINVAL;
+
+ ret = platform_get_irq_byname_optional(pdev, "dma");
+ if (ret > 0)
+ return ret;
+
+ snprintf(name, sizeof(name), "dma%u", nr);
+
+ return platform_get_irq_byname_optional(pdev, name);
+}
+
+static struct dw_edma_plat_ops dw_pcie_edma_ops = {
+ .irq_vector = dw_pcie_edma_irq_vector,
+};
+
+static void dw_pcie_edma_init_data(struct dw_pcie *pci)
+{
+ pci->edma.dev = pci->dev;
+
+ if (!pci->edma.ops)
+ pci->edma.ops = &dw_pcie_edma_ops;
+
+ pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
+}
+
+static int dw_pcie_edma_find_mf(struct dw_pcie *pci)
+{
+ u32 val;
+
+ /*
+ * Bail out finding the mapping format if it is already set by the glue
+ * driver. Also ensure that the edma.reg_base is pointing to a valid
+ * memory region.
+ */
+ if (pci->edma.mf != EDMA_MF_EDMA_LEGACY)
+ return pci->edma.reg_base ? 0 : -ENODEV;
+
+ /*
+ * Indirect eDMA CSRs access has been completely removed since v5.40a
+ * thus no space is now reserved for the eDMA channels viewport and
+ * former DMA CTRL register is no longer fixed to FFs.
+ */
+ if (dw_pcie_ver_is_ge(pci, 540A))
+ val = 0xFFFFFFFF;
+ else
+ val = dw_pcie_readl_dbi(pci, PCIE_DMA_VIEWPORT_BASE + PCIE_DMA_CTRL);
+
+ if (val == 0xFFFFFFFF && pci->edma.reg_base) {
+ pci->edma.mf = EDMA_MF_EDMA_UNROLL;
+ } else if (val != 0xFFFFFFFF) {
+ pci->edma.mf = EDMA_MF_EDMA_LEGACY;
+
+ pci->edma.reg_base = pci->dbi_base + PCIE_DMA_VIEWPORT_BASE;
+ } else {
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_edma_find_channels(struct dw_pcie *pci)
+{
+ u32 val;
+
+ /*
+ * Autodetect the read/write channels count only for non-HDMA platforms.
+ * HDMA platforms with native CSR mapping doesn't support autodetect,
+ * so the glue drivers should've passed the valid count already. If not,
+ * the below sanity check will catch it.
+ */
+ if (pci->edma.mf != EDMA_MF_HDMA_NATIVE) {
+ val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
+
+ pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val);
+ pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val);
+ }
+
+ /* Sanity check the channels count if the mapping was incorrect */
+ if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH ||
+ !pci->edma.ll_rd_cnt || pci->edma.ll_rd_cnt > EDMA_MAX_RD_CH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
+{
+ int ret;
+
+ dw_pcie_edma_init_data(pci);
+
+ ret = dw_pcie_edma_find_mf(pci);
+ if (ret)
+ return ret;
+
+ return dw_pcie_edma_find_channels(pci);
+}
+
+static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
+{
+ struct platform_device *pdev = to_platform_device(pci->dev);
+ u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt;
+ char name[15];
+ int ret;
+
+ if (pci->edma.nr_irqs > 1)
+ return pci->edma.nr_irqs != ch_cnt ? -EINVAL : 0;
+
+ ret = platform_get_irq_byname_optional(pdev, "dma");
+ if (ret > 0) {
+ pci->edma.nr_irqs = 1;
+ return 0;
+ }
+
+ for (; pci->edma.nr_irqs < ch_cnt; pci->edma.nr_irqs++) {
+ snprintf(name, sizeof(name), "dma%d", pci->edma.nr_irqs);
+
+ ret = platform_get_irq_byname_optional(pdev, name);
+ if (ret <= 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_edma_ll_alloc(struct dw_pcie *pci)
+{
+ struct dw_edma_region *ll;
+ dma_addr_t paddr;
+ int i;
+
+ for (i = 0; i < pci->edma.ll_wr_cnt; i++) {
+ ll = &pci->edma.ll_region_wr[i];
+ ll->sz = DMA_LLP_MEM_SIZE;
+ ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
+ &paddr, GFP_KERNEL);
+ if (!ll->vaddr.mem)
+ return -ENOMEM;
+
+ ll->paddr = paddr;
+ }
+
+ for (i = 0; i < pci->edma.ll_rd_cnt; i++) {
+ ll = &pci->edma.ll_region_rd[i];
+ ll->sz = DMA_LLP_MEM_SIZE;
+ ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
+ &paddr, GFP_KERNEL);
+ if (!ll->vaddr.mem)
+ return -ENOMEM;
+
+ ll->paddr = paddr;
+ }
+
+ return 0;
+}
+
+int dw_pcie_edma_detect(struct dw_pcie *pci)
+{
+ int ret;
+
+ /* Don't fail if no eDMA was found (for the backward compatibility) */
+ ret = dw_pcie_edma_find_chip(pci);
+ if (ret)
+ return 0;
+
+ /* Don't fail on the IRQs verification (for the backward compatibility) */
+ ret = dw_pcie_edma_irq_verify(pci);
+ if (ret) {
+ dev_err(pci->dev, "Invalid eDMA IRQs found\n");
+ return 0;
+ }
+
+ ret = dw_pcie_edma_ll_alloc(pci);
+ if (ret) {
+ dev_err(pci->dev, "Couldn't allocate LLP memory\n");
+ return ret;
+ }
+
+ /* Don't fail if the DW eDMA driver can't find the device */
+ ret = dw_edma_probe(&pci->edma);
+ if (ret && ret != -ENODEV) {
+ dev_err(pci->dev, "Couldn't register eDMA device\n");
+ return ret;
+ }
+
+ dev_info(pci->dev, "eDMA: unroll %s, %hu wr, %hu rd\n",
+ pci->edma.mf == EDMA_MF_EDMA_UNROLL ? "T" : "F",
+ pci->edma.ll_wr_cnt, pci->edma.ll_rd_cnt);
+
+ return 0;
+}
+
+void dw_pcie_edma_remove(struct dw_pcie *pci)
+{
+ dw_edma_remove(&pci->edma);
+}
+
+void dw_pcie_hide_unsupported_l1ss(struct dw_pcie *pci)
+{
+ u16 l1ss;
+ u32 l1ss_cap;
+
+ if (pci->l1ss_support)
+ return;
+
+ l1ss = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss)
+ return;
+
+ /*
+ * Unless the driver claims "l1ss_support", don't advertise L1 PM
+ * Substates because they require CLKREQ# and possibly other
+ * device-specific configuration.
+ */
+ l1ss_cap = dw_pcie_readl_dbi(pci, l1ss + PCI_L1SS_CAP);
+ l1ss_cap &= ~(PCI_L1SS_CAP_PCIPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_1 |
+ PCI_L1SS_CAP_PCIPM_L1_2 | PCI_L1SS_CAP_ASPM_L1_2 |
+ PCI_L1SS_CAP_L1_PM_SS);
+ dw_pcie_writel_dbi(pci, l1ss + PCI_L1SS_CAP, l1ss_cap);
+}
+
void dw_pcie_setup(struct dw_pcie *pci)
{
u32 val;
- if (pci->link_gen > 0)
- dw_pcie_link_set_max_speed(pci, pci->link_gen);
+ dw_pcie_link_set_max_speed(pci);
/* Configure Gen1 N_FTS */
if (pci->n_fts[0]) {
@@ -806,11 +1130,6 @@ void dw_pcie_setup(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
}
- val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
- val &= ~PORT_LINK_FAST_LINK_MODE;
- val |= PORT_LINK_DLL_LINK_EN;
- dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
-
if (dw_pcie_cap_is(pci, CDM_CHECK)) {
val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
@@ -818,49 +1137,70 @@ void dw_pcie_setup(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
}
- if (!pci->num_lanes) {
- dev_dbg(pci->dev, "Using h/w default number of lanes\n");
- return;
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+ val &= ~PORT_LINK_FAST_LINK_MODE;
+ val |= PORT_LINK_DLL_LINK_EN;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
+ dw_pcie_link_set_max_link_width(pci, pci->num_lanes);
+}
+
+resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci,
+ const char *reg_name,
+ resource_size_t cpu_phys_addr)
+{
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ int index;
+ u64 reg_addr, fixup_addr;
+ u64 (*fixup)(struct dw_pcie *pcie, u64 cpu_addr);
+
+ /* Look up reg_name address on parent bus */
+ index = of_property_match_string(np, "reg-names", reg_name);
+
+ if (index < 0) {
+ dev_err(dev, "No %s in devicetree \"reg\" property\n", reg_name);
+ return 0;
}
- /* Set the number of lanes */
- val &= ~PORT_LINK_FAST_LINK_MODE;
- val &= ~PORT_LINK_MODE_MASK;
- switch (pci->num_lanes) {
- case 1:
- val |= PORT_LINK_MODE_1_LANES;
- break;
- case 2:
- val |= PORT_LINK_MODE_2_LANES;
- break;
- case 4:
- val |= PORT_LINK_MODE_4_LANES;
- break;
- case 8:
- val |= PORT_LINK_MODE_8_LANES;
- break;
- default:
- dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes);
- return;
+ of_property_read_reg(np, index, &reg_addr, NULL);
+
+ fixup = pci->ops ? pci->ops->cpu_addr_fixup : NULL;
+ if (fixup) {
+ fixup_addr = fixup(pci, cpu_phys_addr);
+ if (reg_addr == fixup_addr) {
+ dev_info(dev, "%s reg[%d] %#010llx == %#010llx == fixup(cpu %#010llx); %ps is redundant with this devicetree\n",
+ reg_name, index, reg_addr, fixup_addr,
+ (unsigned long long) cpu_phys_addr, fixup);
+ } else {
+ dev_warn(dev, "%s reg[%d] %#010llx != %#010llx == fixup(cpu %#010llx); devicetree is broken\n",
+ reg_name, index, reg_addr, fixup_addr,
+ (unsigned long long) cpu_phys_addr);
+ reg_addr = fixup_addr;
+ }
+
+ return cpu_phys_addr - reg_addr;
}
- dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
- /* Set link width speed control register */
- val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
- val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
- switch (pci->num_lanes) {
- case 1:
- val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
- break;
- case 2:
- val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
- break;
- case 4:
- val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
- break;
- case 8:
- val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
- break;
+ if (pci->use_parent_dt_ranges) {
+
+ /*
+ * This platform once had a fixup, presumably because it
+ * translates between CPU and PCI controller addresses.
+ * Log a note if devicetree didn't describe a translation.
+ */
+ if (reg_addr == cpu_phys_addr)
+ dev_info(dev, "%s reg[%d] %#010llx == cpu %#010llx\n; no fixup was ever needed for this devicetree\n",
+ reg_name, index, reg_addr,
+ (unsigned long long) cpu_phys_addr);
+ } else {
+ if (reg_addr != cpu_phys_addr) {
+ dev_warn(dev, "%s reg[%d] %#010llx != cpu %#010llx; no fixup and devicetree \"ranges\" is broken, assuming no translation\n",
+ reg_name, index, reg_addr,
+ (unsigned long long) cpu_phys_addr);
+ return 0;
+ }
}
- dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ return cpu_phys_addr - reg_addr;
}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 393dfb931df6..31685951a080 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -15,15 +15,19 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/dma/edma.h>
#include <linux/gpio/consumer.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/reset.h>
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
+#include "../../pci.h"
+
/* DWC PCIe IP-core versions (native support since v4.70a) */
#define DW_PCIE_VER_365A 0x3336352a
#define DW_PCIE_VER_460A 0x3436302a
@@ -31,6 +35,7 @@
#define DW_PCIE_VER_480A 0x3438302a
#define DW_PCIE_VER_490A 0x3439302a
#define DW_PCIE_VER_520A 0x3532302a
+#define DW_PCIE_VER_540A 0x3534302a
#define __dw_pcie_ver_cmp(_pci, _ver, _op) \
((_pci)->version _op DW_PCIE_VER_ ## _ver)
@@ -58,16 +63,14 @@
#define dw_pcie_cap_set(_pci, _cap) \
set_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
-/* Parameters for the waiting for link up routine */
-#define LINK_WAIT_MAX_RETRIES 10
-#define LINK_WAIT_USLEEP_MIN 90000
-#define LINK_WAIT_USLEEP_MAX 100000
-
/* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES 5
#define LINK_WAIT_IATU 9
/* Synopsys-specific PCIe configuration registers */
+#define PCIE_PORT_FORCE 0x708
+#define PORT_FORCE_DO_DESKEW_FOR_SRIS BIT(23)
+
#define PCIE_PORT_AFR 0x70C
#define PORT_AFR_N_FTS_MASK GENMASK(15, 8)
#define PORT_AFR_N_FTS(n) FIELD_PREP(PORT_AFR_N_FTS_MASK, n)
@@ -88,9 +91,13 @@
#define PORT_LINK_MODE_2_LANES PORT_LINK_MODE(0x3)
#define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7)
#define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf)
+#define PORT_LINK_MODE_16_LANES PORT_LINK_MODE(0x1f)
+
+#define PCIE_PORT_LANE_SKEW 0x714
+#define PORT_LANE_SKEW_INSERT_MASK GENMASK(23, 0)
#define PCIE_PORT_DEBUG0 0x728
-#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
+#define PORT_LOGIC_LTSSM_STATE_MASK 0x3f
#define PORT_LOGIC_LTSSM_STATE_L0 0x11
#define PCIE_PORT_DEBUG1 0x72C
#define PCIE_PORT_DEBUG1_LINK_UP BIT(4)
@@ -114,11 +121,31 @@
#define GEN3_RELATED_OFF 0x890
#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
+#define GEN3_RELATED_OFF_EQ_PHASE_2_3 BIT(9)
#define GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS BIT(13)
#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
+#define GEN3_EQ_CONTROL_OFF 0x8A8
+#define GEN3_EQ_CONTROL_OFF_FB_MODE GENMASK(3, 0)
+#define GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE BIT(4)
+#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC GENMASK(23, 8)
+#define GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL BIT(24)
+
+#define GEN3_EQ_FB_MODE_DIR_CHANGE_OFF 0x8AC
+#define GEN3_EQ_FMDC_T_MIN_PHASE23 GENMASK(4, 0)
+#define GEN3_EQ_FMDC_N_EVALS GENMASK(9, 5)
+#define GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA GENMASK(13, 10)
+#define GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA GENMASK(17, 14)
+
+#define COHERENCY_CONTROL_1_OFF 0x8E0
+#define CFG_MEMTYPE_BOUNDARY_LOW_ADDR_MASK GENMASK(31, 2)
+#define CFG_MEMTYPE_VALUE BIT(0)
+
+#define COHERENCY_CONTROL_2_OFF 0x8E4
+#define COHERENCY_CONTROL_3_OFF 0x8E8
+
#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
@@ -145,11 +172,14 @@
#define PCIE_ATU_TYPE_IO 0x2
#define PCIE_ATU_TYPE_CFG0 0x4
#define PCIE_ATU_TYPE_CFG1 0x5
+#define PCIE_ATU_TYPE_MSG 0x10
#define PCIE_ATU_TD BIT(8)
#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
#define PCIE_ATU_REGION_CTRL2 0x004
#define PCIE_ATU_ENABLE BIT(31)
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
+#define PCIE_ATU_CFG_SHIFT_MODE_ENABLE BIT(28)
+#define PCIE_ATU_INHIBIT_PAYLOAD BIT(22)
#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
#define PCIE_ATU_LOWER_BASE 0x008
#define PCIE_ATU_UPPER_BASE 0x00C
@@ -167,6 +197,18 @@
#define PCIE_MSIX_DOORBELL 0x948
#define PCIE_MSIX_DOORBELL_PF_SHIFT 24
+/*
+ * eDMA CSRs. DW PCIe IP-core v4.70a and older had the eDMA registers accessible
+ * over the Port Logic registers space. Afterwards the unrolled mapping was
+ * introduced so eDMA and iATU could be accessed via a dedicated registers
+ * space.
+ */
+#define PCIE_DMA_VIEWPORT_BASE 0x970
+#define PCIE_DMA_UNROLL_BASE 0x80000
+#define PCIE_DMA_CTRL 0x008
+#define PCIE_DMA_NUM_WR_CHAN GENMASK(3, 0)
+#define PCIE_DMA_NUM_RD_CHAN GENMASK(19, 16)
+
#define PCIE_PL_CHK_REG_CONTROL_STATUS 0xB20
#define PCIE_PL_CHK_REG_CHK_REG_START BIT(0)
#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS BIT(1)
@@ -177,6 +219,24 @@
#define PCIE_PL_CHK_REG_ERR_ADDR 0xB28
/*
+ * 16.0 GT/s (Gen 4) lane margining register definitions
+ */
+#define GEN4_LANE_MARGINING_1_OFF 0xB80
+#define MARGINING_MAX_VOLTAGE_OFFSET GENMASK(29, 24)
+#define MARGINING_NUM_VOLTAGE_STEPS GENMASK(22, 16)
+#define MARGINING_MAX_TIMING_OFFSET GENMASK(13, 8)
+#define MARGINING_NUM_TIMING_STEPS GENMASK(5, 0)
+
+#define GEN4_LANE_MARGINING_2_OFF 0xB84
+#define MARGINING_IND_ERROR_SAMPLER BIT(28)
+#define MARGINING_SAMPLE_REPORTING_METHOD BIT(27)
+#define MARGINING_IND_LEFT_RIGHT_TIMING BIT(26)
+#define MARGINING_IND_UP_DOWN_VOLTAGE BIT(25)
+#define MARGINING_VOLTAGE_SUPPORTED BIT(24)
+#define MARGINING_MAXLANES GENMASK(20, 16)
+#define MARGINING_SAMPLE_RATE_TIMING GENMASK(13, 8)
+#define MARGINING_SAMPLE_RATE_VOLTAGE GENMASK(5, 0)
+/*
* iATU Unroll-specific register definitions
* From 4.80 core version the address translation will be made by unroll
*/
@@ -208,6 +268,21 @@
#define PCIE_RAS_DES_EVENT_COUNTER_DATA 0xc
+/* PTM register definitions */
+#define PTM_RES_REQ_CTRL 0x8
+#define PTM_RES_CCONTEXT_VALID BIT(0)
+#define PTM_REQ_AUTO_UPDATE_ENABLED BIT(0)
+#define PTM_REQ_START_UPDATE BIT(1)
+
+#define PTM_LOCAL_LSB 0x10
+#define PTM_LOCAL_MSB 0x14
+#define PTM_T1_T2_LSB 0x18
+#define PTM_T1_T2_MSB 0x1c
+#define PTM_T3_T4_LSB 0x28
+#define PTM_T3_T4_MSB 0x2c
+#define PTM_MASTER_LSB 0x38
+#define PTM_MASTER_MSB 0x3c
+
/*
* The default address offset between dbi_base and atu_base. Root controller
* drivers are not required to initialize atu_base if the offset matches this
@@ -215,6 +290,7 @@
* this offset, if atu_base not set.
*/
#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
+#define DEFAULT_DBI_DMA_OFFSET PCIE_DMA_UNROLL_BASE
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
@@ -226,6 +302,9 @@
#define MAX_IATU_IN 256
#define MAX_IATU_OUT 256
+/* Default eDMA LLP memory size */
+#define DMA_LLP_MEM_SIZE PAGE_SIZE
+
struct dw_pcie;
struct dw_pcie_rp;
struct dw_pcie_ep;
@@ -270,10 +349,66 @@ enum dw_pcie_core_rst {
DW_PCIE_NUM_CORE_RSTS
};
+enum dw_pcie_ltssm {
+ /* Need to align with PCIE_PORT_DEBUG0 bits 0:5 */
+ DW_PCIE_LTSSM_DETECT_QUIET = 0x0,
+ DW_PCIE_LTSSM_DETECT_ACT = 0x1,
+ DW_PCIE_LTSSM_POLL_ACTIVE = 0x2,
+ DW_PCIE_LTSSM_POLL_COMPLIANCE = 0x3,
+ DW_PCIE_LTSSM_POLL_CONFIG = 0x4,
+ DW_PCIE_LTSSM_PRE_DETECT_QUIET = 0x5,
+ DW_PCIE_LTSSM_DETECT_WAIT = 0x6,
+ DW_PCIE_LTSSM_CFG_LINKWD_START = 0x7,
+ DW_PCIE_LTSSM_CFG_LINKWD_ACEPT = 0x8,
+ DW_PCIE_LTSSM_CFG_LANENUM_WAI = 0x9,
+ DW_PCIE_LTSSM_CFG_LANENUM_ACEPT = 0xa,
+ DW_PCIE_LTSSM_CFG_COMPLETE = 0xb,
+ DW_PCIE_LTSSM_CFG_IDLE = 0xc,
+ DW_PCIE_LTSSM_RCVRY_LOCK = 0xd,
+ DW_PCIE_LTSSM_RCVRY_SPEED = 0xe,
+ DW_PCIE_LTSSM_RCVRY_RCVRCFG = 0xf,
+ DW_PCIE_LTSSM_RCVRY_IDLE = 0x10,
+ DW_PCIE_LTSSM_L0 = 0x11,
+ DW_PCIE_LTSSM_L0S = 0x12,
+ DW_PCIE_LTSSM_L123_SEND_EIDLE = 0x13,
+ DW_PCIE_LTSSM_L1_IDLE = 0x14,
+ DW_PCIE_LTSSM_L2_IDLE = 0x15,
+ DW_PCIE_LTSSM_L2_WAKE = 0x16,
+ DW_PCIE_LTSSM_DISABLED_ENTRY = 0x17,
+ DW_PCIE_LTSSM_DISABLED_IDLE = 0x18,
+ DW_PCIE_LTSSM_DISABLED = 0x19,
+ DW_PCIE_LTSSM_LPBK_ENTRY = 0x1a,
+ DW_PCIE_LTSSM_LPBK_ACTIVE = 0x1b,
+ DW_PCIE_LTSSM_LPBK_EXIT = 0x1c,
+ DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT = 0x1d,
+ DW_PCIE_LTSSM_HOT_RESET_ENTRY = 0x1e,
+ DW_PCIE_LTSSM_HOT_RESET = 0x1f,
+ DW_PCIE_LTSSM_RCVRY_EQ0 = 0x20,
+ DW_PCIE_LTSSM_RCVRY_EQ1 = 0x21,
+ DW_PCIE_LTSSM_RCVRY_EQ2 = 0x22,
+ DW_PCIE_LTSSM_RCVRY_EQ3 = 0x23,
+
+ DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF,
+};
+
+struct dw_pcie_ob_atu_cfg {
+ int index;
+ int type;
+ u8 func_no;
+ u8 code;
+ u8 routing;
+ u32 ctrl2;
+ u64 parent_bus_addr;
+ u64 pci_addr;
+ u64 size;
+};
+
struct dw_pcie_host_ops {
- int (*host_init)(struct dw_pcie_rp *pp);
- void (*host_deinit)(struct dw_pcie_rp *pp);
- int (*msi_host_init)(struct dw_pcie_rp *pp);
+ int (*init)(struct dw_pcie_rp *pp);
+ void (*deinit)(struct dw_pcie_rp *pp);
+ void (*post_init)(struct dw_pcie_rp *pp);
+ int (*msi_init)(struct dw_pcie_rp *pp);
+ void (*pme_turn_off)(struct dw_pcie_rp *pp);
};
struct dw_pcie_rp {
@@ -289,7 +424,6 @@ struct dw_pcie_rp {
const struct dw_pcie_host_ops *ops;
int msi_irq[MAX_MSI_CTRLS];
struct irq_domain *irq_domain;
- struct irq_domain *msi_domain;
dma_addr_t msi_data;
struct irq_chip *msi_irq_chip;
u32 num_vectors;
@@ -297,12 +431,21 @@ struct dw_pcie_rp {
struct pci_host_bridge *bridge;
raw_spinlock_t lock;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
+ bool use_atu_msg;
+ int msg_atu_index;
+ struct resource *msg_res;
+ bool use_linkup_irq;
+ struct pci_eq_presets presets;
+ struct pci_config_window *cfg;
+ bool ecam_enabled;
+ bool native_ecam;
};
struct dw_pcie_ep_ops {
- void (*ep_init)(struct dw_pcie_ep *ep);
+ void (*pre_init)(struct dw_pcie_ep *ep);
+ void (*init)(struct dw_pcie_ep *ep);
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num);
+ unsigned int type, u16 interrupt_num);
const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
/*
* Provide a method to implement the different func config space
@@ -311,7 +454,8 @@ struct dw_pcie_ep_ops {
* return a 0, and implement code in callback function of platform
* driver.
*/
- unsigned int (*func_conf_select)(struct dw_pcie_ep *ep, u8 func_no);
+ unsigned int (*get_dbi_offset)(struct dw_pcie_ep *ep, u8 func_no);
+ unsigned int (*get_dbi2_offset)(struct dw_pcie_ep *ep, u8 func_no);
};
struct dw_pcie_ep_func {
@@ -345,17 +489,28 @@ struct dw_pcie_ops {
size_t size, u32 val);
void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
size_t size, u32 val);
- int (*link_up)(struct dw_pcie *pcie);
+ bool (*link_up)(struct dw_pcie *pcie);
+ enum dw_pcie_ltssm (*get_ltssm)(struct dw_pcie *pcie);
int (*start_link)(struct dw_pcie *pcie);
void (*stop_link)(struct dw_pcie *pcie);
+ int (*assert_perst)(struct dw_pcie *pcie, bool assert);
+};
+
+struct debugfs_info {
+ struct dentry *debug_dir;
+ void *rasdes_info;
};
struct dw_pcie {
struct device *dev;
void __iomem *dbi_base;
+ resource_size_t dbi_phys_addr;
void __iomem *dbi_base2;
void __iomem *atu_base;
+ void __iomem *elbi_base;
+ resource_size_t atu_phys_addr;
size_t atu_size;
+ resource_size_t parent_bus_offset;
u32 num_ib_windows;
u32 num_ob_windows;
u32 region_align;
@@ -367,13 +522,33 @@ struct dw_pcie {
u32 type;
unsigned long caps;
int num_lanes;
- int link_gen;
+ int max_link_speed;
u8 n_fts[2];
+ struct dw_edma_chip edma;
+ bool l1ss_support; /* L1 PM Substates support */
struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS];
struct clk_bulk_data core_clks[DW_PCIE_NUM_CORE_CLKS];
struct reset_control_bulk_data app_rsts[DW_PCIE_NUM_APP_RSTS];
struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS];
struct gpio_desc *pe_rst;
+ bool suspended;
+ struct debugfs_info *debugfs;
+ enum dw_pcie_device_mode mode;
+ u16 ptm_vsec_offset;
+ struct pci_ptm_debugfs *ptm_debugfs;
+
+ /*
+ * If iATU input addresses are offset from CPU physical addresses,
+ * we previously required .cpu_addr_fixup() to convert them. We
+ * now rely on the devicetree instead. If .cpu_addr_fixup()
+ * exists, we compare its results with devicetree.
+ *
+ * If .cpu_addr_fixup() does not exist, we assume the offset is
+ * zero and warn if devicetree claims otherwise. If we know all
+ * devicetrees correctly describe the offset, set
+ * use_parent_dt_ranges to true to avoid this warning.
+ */
+ bool use_parent_dt_ranges;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -387,6 +562,8 @@ void dw_pcie_version_detect(struct dw_pcie *pci);
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
+u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci);
+u16 dw_pcie_find_ptm_capability(struct dw_pcie *pci);
int dw_pcie_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_write(void __iomem *addr, int size, u32 val);
@@ -394,20 +571,26 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val);
u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size);
void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
-int dw_pcie_link_up(struct dw_pcie *pci);
+bool dw_pcie_link_up(struct dw_pcie *pci);
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
-int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size);
-int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr, u64 size);
+int dw_pcie_link_get_max_link_width(struct dw_pcie *pci);
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
+ const struct dw_pcie_ob_atu_cfg *atu);
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size);
+ u64 parent_bus_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar);
+ int type, u64 parent_bus_addr,
+ u8 bar, size_t size);
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
+void dw_pcie_hide_unsupported_l1ss(struct dw_pcie *pci);
void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
+int dw_pcie_edma_detect(struct dw_pcie *pci);
+void dw_pcie_edma_remove(struct dw_pcie *pci);
+resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci,
+ const char *reg_name,
+ resource_size_t cpu_phy_addr);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
@@ -444,6 +627,141 @@ static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
dw_pcie_write_dbi2(pci, reg, 0x4, val);
}
+static inline int dw_pcie_read_cfg_byte(struct dw_pcie *pci, int where,
+ u8 *val)
+{
+ *val = dw_pcie_readb_dbi(pci, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_read_cfg_word(struct dw_pcie *pci, int where,
+ u16 *val)
+{
+ *val = dw_pcie_readw_dbi(pci, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_read_cfg_dword(struct dw_pcie *pci, int where,
+ u32 *val)
+{
+ *val = dw_pcie_readl_dbi(pci, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline unsigned int dw_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ unsigned int dbi_offset = 0;
+
+ if (ep->ops->get_dbi_offset)
+ dbi_offset = ep->ops->get_dbi_offset(ep, func_no);
+
+ return dbi_offset;
+}
+
+static inline u32 dw_pcie_ep_read_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, size_t size)
+{
+ unsigned int offset = dw_pcie_ep_get_dbi_offset(ep, func_no);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ return dw_pcie_read_dbi(pci, offset + reg, size);
+}
+
+static inline void dw_pcie_ep_write_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, size_t size, u32 val)
+{
+ unsigned int offset = dw_pcie_ep_get_dbi_offset(ep, func_no);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ dw_pcie_write_dbi(pci, offset + reg, size, val);
+}
+
+static inline void dw_pcie_ep_writel_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, u32 val)
+{
+ dw_pcie_ep_write_dbi(ep, func_no, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_ep_readl_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg)
+{
+ return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x4);
+}
+
+static inline void dw_pcie_ep_writew_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, u16 val)
+{
+ dw_pcie_ep_write_dbi(ep, func_no, reg, 0x2, val);
+}
+
+static inline u16 dw_pcie_ep_readw_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg)
+{
+ return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x2);
+}
+
+static inline void dw_pcie_ep_writeb_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, u8 val)
+{
+ dw_pcie_ep_write_dbi(ep, func_no, reg, 0x1, val);
+}
+
+static inline u8 dw_pcie_ep_readb_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg)
+{
+ return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x1);
+}
+
+static inline int dw_pcie_ep_read_cfg_byte(struct dw_pcie_ep *ep, u8 func_no,
+ int where, u8 *val)
+{
+ *val = dw_pcie_ep_readb_dbi(ep, func_no, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_ep_read_cfg_word(struct dw_pcie_ep *ep, u8 func_no,
+ int where, u16 *val)
+{
+ *val = dw_pcie_ep_readw_dbi(ep, func_no, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_ep_read_cfg_dword(struct dw_pcie_ep *ep, u8 func_no,
+ int where, u32 *val)
+{
+ *val = dw_pcie_ep_readl_dbi(ep, func_no, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline unsigned int dw_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ unsigned int dbi2_offset = 0;
+
+ if (ep->ops->get_dbi2_offset)
+ dbi2_offset = ep->ops->get_dbi2_offset(ep, func_no);
+ else if (ep->ops->get_dbi_offset) /* for backward compatibility */
+ dbi2_offset = ep->ops->get_dbi_offset(ep, func_no);
+
+ return dbi2_offset;
+}
+
+static inline void dw_pcie_ep_write_dbi2(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, size_t size, u32 val)
+{
+ unsigned int offset = dw_pcie_ep_get_dbi2_offset(ep, func_no);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ dw_pcie_write_dbi2(pci, offset + reg, size, val);
+}
+
+static inline void dw_pcie_ep_writel_dbi2(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, u32 val)
+{
+ dw_pcie_ep_write_dbi2(ep, func_no, reg, 0x4, val);
+}
+
static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
{
u32 reg;
@@ -480,8 +798,33 @@ static inline void dw_pcie_stop_link(struct dw_pcie *pci)
pci->ops->stop_link(pci);
}
+static inline int dw_pcie_assert_perst(struct dw_pcie *pci, bool assert)
+{
+ if (pci->ops && pci->ops->assert_perst)
+ return pci->ops->assert_perst(pci, assert);
+
+ return 0;
+}
+
+static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci)
+{
+ u32 val;
+
+ if (pci->ops && pci->ops->get_ltssm)
+ return pci->ops->get_ltssm(pci);
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
+
+ return (enum dw_pcie_ltssm)FIELD_GET(PORT_LOGIC_LTSSM_STATE_MASK, val);
+}
+
#ifdef CONFIG_PCIE_DW_HOST
+int dw_pcie_suspend_noirq(struct dw_pcie *pci);
+int dw_pcie_resume_noirq(struct dw_pcie *pci);
irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
+void dw_pcie_msi_init(struct dw_pcie_rp *pp);
+int dw_pcie_msi_host_init(struct dw_pcie_rp *pp);
+void dw_pcie_free_msi(struct dw_pcie_rp *pp);
int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
int dw_pcie_host_init(struct dw_pcie_rp *pp);
void dw_pcie_host_deinit(struct dw_pcie_rp *pp);
@@ -489,11 +832,32 @@ int dw_pcie_allocate_domains(struct dw_pcie_rp *pp);
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
+static inline int dw_pcie_suspend_noirq(struct dw_pcie *pci)
+{
+ return 0;
+}
+
+static inline int dw_pcie_resume_noirq(struct dw_pcie *pci)
+{
+ return 0;
+}
+
static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
{
return IRQ_NONE;
}
+static inline void dw_pcie_msi_init(struct dw_pcie_rp *pp)
+{ }
+
+static inline int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
+{
+ return -ENODEV;
+}
+
+static inline void dw_pcie_free_msi(struct dw_pcie_rp *pp)
+{ }
+
static inline int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
{
return 0;
@@ -522,11 +886,12 @@ static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus,
#ifdef CONFIG_PCIE_DW_EP
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
+void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep);
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
-int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep);
-void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
-void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
-int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
+int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep);
+void dw_pcie_ep_deinit(struct dw_pcie_ep *ep);
+void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep);
+int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num);
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
@@ -534,6 +899,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num);
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
+int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap);
struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no);
#else
@@ -541,25 +907,29 @@ static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
}
+static inline void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
+{
+}
+
static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
return 0;
}
-static inline int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+static inline int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
{
return 0;
}
-static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
+static inline void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
{
}
-static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+static inline void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
{
}
-static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+static inline int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
{
return 0;
}
@@ -587,10 +957,30 @@ static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
}
+static inline int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci,
+ u8 prev_cap, u8 cap)
+{
+ return 0;
+}
+
static inline struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
{
return NULL;
}
#endif
+
+#ifdef CONFIG_PCIE_DW_DEBUGFS
+void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode);
+void dwc_pcie_debugfs_deinit(struct dw_pcie *pci);
+#else
+static inline void dwc_pcie_debugfs_init(struct dw_pcie *pci,
+ enum dw_pcie_device_mode mode)
+{
+}
+static inline void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
+{
+}
+#endif
+
#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index c1e7653e508e..f8605fe61a41 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -8,71 +8,108 @@
* Author: Simon Xue <xxm@rock-chips.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
+#include <linux/hw_bitfield.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include "../../pci.h"
#include "pcie-designware.h"
/*
* The upper 16 bits of PCIE_CLIENT_CONFIG are a write
* mask for the lower 16 bits.
*/
-#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
-#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
-#define HIWORD_DISABLE_BIT(val) HIWORD_UPDATE(val, ~val)
#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
-#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
-#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
-#define PCIE_SMLH_LINKUP BIT(16)
-#define PCIE_RDLH_LINKUP BIT(17)
-#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
-#define PCIE_L0S_ENTRY 0x11
-#define PCIE_CLIENT_GENERAL_CONTROL 0x0
+/* General Control Register */
+#define PCIE_CLIENT_GENERAL_CON 0x0
+#define PCIE_CLIENT_MODE_MASK GENMASK(7, 4)
+#define PCIE_CLIENT_MODE_EP 0x0UL
+#define PCIE_CLIENT_MODE_RC 0x4UL
+#define PCIE_CLIENT_SET_MODE(x) FIELD_PREP_WM16(PCIE_CLIENT_MODE_MASK, (x))
+#define PCIE_CLIENT_LD_RQ_RST_GRT FIELD_PREP_WM16(BIT(3), 1)
+#define PCIE_CLIENT_ENABLE_LTSSM FIELD_PREP_WM16(BIT(2), 1)
+#define PCIE_CLIENT_DISABLE_LTSSM FIELD_PREP_WM16(BIT(2), 0)
+
+/* Interrupt Status Register Related to Legacy Interrupt */
#define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8
+
+/* Interrupt Status Register Related to Miscellaneous Operation */
+#define PCIE_CLIENT_INTR_STATUS_MISC 0x10
+#define PCIE_RDLH_LINK_UP_CHGED BIT(1)
+#define PCIE_LINK_REQ_RST_NOT_INT BIT(2)
+
+/* Interrupt Mask Register Related to Legacy Interrupt */
#define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
-#define PCIE_CLIENT_GENERAL_DEBUG 0x104
+#define PCIE_INTR_MASK GENMASK(7, 0)
+#define PCIE_INTR_CLAMP(_x) ((BIT((_x)) & PCIE_INTR_MASK))
+#define PCIE_INTR_LEGACY_MASK(x) (PCIE_INTR_CLAMP((x)) | \
+ (PCIE_INTR_CLAMP((x)) << 16))
+#define PCIE_INTR_LEGACY_UNMASK(x) (PCIE_INTR_CLAMP((x)) << 16)
+
+/* Interrupt Mask Register Related to Miscellaneous Operation */
+#define PCIE_CLIENT_INTR_MASK_MISC 0x24
+
+/* Power Management Control Register */
+#define PCIE_CLIENT_POWER_CON 0x2c
+#define PCIE_CLKREQ_READY FIELD_PREP_WM16(BIT(0), 1)
+#define PCIE_CLKREQ_NOT_READY FIELD_PREP_WM16(BIT(0), 0)
+#define PCIE_CLKREQ_PULL_DOWN FIELD_PREP_WM16(GENMASK(13, 12), 1)
+
+/* Hot Reset Control Register */
#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+#define PCIE_LTSSM_APP_DLY2_EN BIT(1)
+#define PCIE_LTSSM_APP_DLY2_DONE BIT(3)
+#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
+
+/* LTSSM Status Register */
#define PCIE_CLIENT_LTSSM_STATUS 0x300
-#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
-#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
+#define PCIE_LINKUP 0x3
+#define PCIE_LINKUP_MASK GENMASK(17, 16)
+#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
struct rockchip_pcie {
- struct dw_pcie pci;
- void __iomem *apb_base;
- struct phy *phy;
- struct clk_bulk_data *clks;
- unsigned int clk_cnt;
- struct reset_control *rst;
- struct gpio_desc *rst_gpio;
- struct regulator *vpcie3v3;
- struct irq_domain *irq_domain;
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ unsigned int clk_cnt;
+ struct reset_control *rst;
+ struct gpio_desc *rst_gpio;
+ struct irq_domain *irq_domain;
+ const struct rockchip_pcie_of_data *data;
+ bool supports_clkreq;
};
-static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip,
- u32 reg)
+struct rockchip_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+ const struct pci_epc_features *epc_features;
+};
+
+static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip, u32 reg)
{
return readl_relaxed(rockchip->apb_base + reg);
}
-static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip,
- u32 val, u32 reg)
+static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip, u32 val,
+ u32 reg)
{
writel_relaxed(val, rockchip->apb_base + reg);
}
-static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+static void rockchip_pcie_intx_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
@@ -91,14 +128,14 @@ static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
static void rockchip_intx_mask(struct irq_data *data)
{
rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
- HIWORD_UPDATE_BIT(BIT(data->hwirq)),
+ PCIE_INTR_LEGACY_MASK(data->hwirq),
PCIE_CLIENT_INTR_MASK_LEGACY);
};
static void rockchip_intx_unmask(struct irq_data *data)
{
rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
- HIWORD_DISABLE_BIT(BIT(data->hwirq)),
+ PCIE_INTR_LEGACY_UNMASK(data->hwirq),
PCIE_CLIENT_INTR_MASK_LEGACY);
};
@@ -133,8 +170,8 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return -EINVAL;
}
- rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &intx_domain_ops, rockchip);
+ rockchip->irq_domain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
of_node_put(intc);
if (!rockchip->irq_domain) {
dev_err(dev, "failed to get a INTx IRQ domain\n");
@@ -144,22 +181,73 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return 0;
}
+static u32 rockchip_pcie_get_ltssm(struct rockchip_pcie *rockchip)
+{
+ return rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+}
+
static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
{
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
- PCIE_CLIENT_GENERAL_CONTROL);
+ PCIE_CLIENT_GENERAL_CON);
+}
+
+static void rockchip_pcie_disable_ltssm(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_DISABLE_LTSSM,
+ PCIE_CLIENT_GENERAL_CON);
}
-static int rockchip_pcie_link_up(struct dw_pcie *pci)
+static bool rockchip_pcie_link_up(struct dw_pcie *pci)
{
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
- u32 val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+ u32 val = rockchip_pcie_get_ltssm(rockchip);
- if ((val & PCIE_LINKUP) == PCIE_LINKUP &&
- (val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY)
- return 1;
+ return FIELD_GET(PCIE_LINKUP_MASK, val) == PCIE_LINKUP;
+}
- return 0;
+/*
+ * See e.g. section '11.6.6.4 L1 Substate' in the RK3588 TRM V1.0 for the steps
+ * needed to support L1 substates. Currently, just enable L1 substates for RC
+ * mode if CLKREQ# is properly connected and supports-clkreq is present in DT.
+ * For EP mode, there are more things should be done to actually save power in
+ * L1 substates, so disable L1 substates until there is proper support.
+ */
+static void rockchip_pcie_configure_l1ss(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ /* Enable L1 substates if CLKREQ# is properly connected */
+ if (rockchip->supports_clkreq) {
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLKREQ_READY,
+ PCIE_CLIENT_POWER_CON);
+ pci->l1ss_support = true;
+ return;
+ }
+
+ /*
+ * Otherwise, assert CLKREQ# unconditionally. Since
+ * pci->l1ss_support is not set, the DWC core will prevent L1
+ * Substates support from being advertised.
+ */
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLKREQ_PULL_DOWN | PCIE_CLKREQ_NOT_READY,
+ PCIE_CLIENT_POWER_CON);
+}
+
+static void rockchip_pcie_enable_l0s(struct dw_pcie *pci)
+{
+ u32 cap, lnkcap;
+
+ /* Enable L0S capability for all SoCs */
+ cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ if (cap) {
+ lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
+ lnkcap |= PCI_EXP_LNKCAP_ASPM_L0S;
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
}
static int rockchip_pcie_start_link(struct dw_pcie *pci)
@@ -180,18 +268,24 @@ static int rockchip_pcie_start_link(struct dw_pcie *pci)
* We need more extra time as before, rather than setting just
* 100us as we don't know how long should the device need to reset.
*/
- msleep(100);
+ msleep(PCIE_T_PVPERL_MS);
gpiod_set_value_cansleep(rockchip->rst_gpio, 1);
return 0;
}
+static void rockchip_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ rockchip_pcie_disable_ltssm(rockchip);
+}
+
static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
struct device *dev = rockchip->pci.dev;
- u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
int irq, ret;
irq = of_irq_get_byname(dev->of_node, "legacy");
@@ -202,20 +296,119 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
if (ret < 0)
dev_err(dev, "failed to init irq domain\n");
- irq_set_chained_handler_and_data(irq, rockchip_pcie_legacy_int_handler,
+ irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler,
rockchip);
- /* LTSSM enable control mode */
- rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
-
- rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
- PCIE_CLIENT_GENERAL_CONTROL);
+ rockchip_pcie_configure_l1ss(pci);
+ rockchip_pcie_enable_l0s(pci);
return 0;
}
static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {
- .host_init = rockchip_pcie_host_init,
+ .init = rockchip_pcie_host_init,
+};
+
+/*
+ * ATS does not work on RK3588 when running in EP mode.
+ *
+ * After the host has enabled ATS on the EP side, it will send an IOTLB
+ * invalidation request to the EP side. However, the RK3588 will never send
+ * a completion back and eventually the host will print an IOTLB_INV_TIMEOUT
+ * error, and the EP will not be operational. If we hide the ATS capability,
+ * things work as expected.
+ */
+static void rockchip_pcie_ep_hide_broken_ats_cap_rk3588(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+
+ /* Only hide the ATS capability for RK3588 running in EP mode. */
+ if (!of_device_is_compatible(dev->of_node, "rockchip,rk3588-pcie-ep"))
+ return;
+
+ if (dw_pcie_ep_hide_ext_capability(pci, PCI_EXT_CAP_ID_SECPCI,
+ PCI_EXT_CAP_ID_ATS))
+ dev_err(dev, "failed to hide ATS capability\n");
+}
+
+static void rockchip_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ rockchip_pcie_enable_l0s(pci);
+ rockchip_pcie_ep_hide_broken_ats_cap_rk3588(ep);
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+};
+
+static int rockchip_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features rockchip_pcie_epc_features_rk3568 = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .msix_capable = true,
+ .align = SZ_64K,
+ .bar[BAR_0] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_1] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_4] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
+};
+
+/*
+ * BAR4 on rk3588 exposes the ATU Port Logic Structure to the host regardless of
+ * iATU settings for BAR4. This means that BAR4 cannot be used by an EPF driver,
+ * so mark it as RESERVED. (rockchip_pcie_ep_init() will disable all BARs by
+ * default.) If the host could write to BAR4, the iATU settings (for all other
+ * BARs) would be overwritten, resulting in (all other BARs) no longer working.
+ */
+static const struct pci_epc_features rockchip_pcie_epc_features_rk3588 = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .msix_capable = true,
+ .align = SZ_64K,
+ .bar[BAR_0] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_1] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
+};
+
+static const struct pci_epc_features *
+rockchip_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ return rockchip->data->epc_features;
+}
+
+static const struct dw_pcie_ep_ops rockchip_pcie_ep_ops = {
+ .init = rockchip_pcie_ep_init,
+ .raise_irq = rockchip_pcie_raise_irq,
+ .get_features = rockchip_pcie_get_features,
};
static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
@@ -225,11 +418,15 @@ static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
ret = devm_clk_bulk_get_all(dev, &rockchip->clks);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret, "failed to get clocks\n");
rockchip->clk_cnt = ret;
- return clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
+ ret = clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable clocks\n");
+
+ return 0;
}
static int rockchip_pcie_resource_get(struct platform_device *pdev,
@@ -237,18 +434,23 @@ static int rockchip_pcie_resource_get(struct platform_device *pdev,
{
rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
if (IS_ERR(rockchip->apb_base))
- return PTR_ERR(rockchip->apb_base);
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->apb_base),
+ "failed to map apb registers\n");
rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
- GPIOD_OUT_HIGH);
+ GPIOD_OUT_LOW);
if (IS_ERR(rockchip->rst_gpio))
- return PTR_ERR(rockchip->rst_gpio);
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst_gpio),
+ "failed to get reset gpio\n");
rockchip->rst = devm_reset_control_array_get_exclusive(&pdev->dev);
if (IS_ERR(rockchip->rst))
return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst),
"failed to get reset lines\n");
+ rockchip->supports_clkreq = of_property_read_bool(pdev->dev.of_node,
+ "supports-clkreq");
+
return 0;
}
@@ -275,22 +477,197 @@ static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip)
static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
{
- phy_exit(rockchip->phy);
phy_power_off(rockchip->phy);
+ phy_exit(rockchip->phy);
}
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = rockchip_pcie_link_up,
.start_link = rockchip_pcie_start_link,
+ .stop_link = rockchip_pcie_stop_link,
};
+static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct dw_pcie *pci = &rockchip->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = pci->dev;
+ u32 reg;
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
+ rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
+
+ dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
+ dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
+
+ if (reg & PCIE_RDLH_LINK_UP_CHGED) {
+ if (rockchip_pcie_link_up(pci)) {
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+ dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
+ /* Rescan the bus to enumerate endpoint devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(pp->bridge->bus);
+ pci_unlock_rescan_remove();
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct dw_pcie *pci = &rockchip->pci;
+ struct device *dev = pci->dev;
+ u32 reg, val;
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
+ rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
+
+ dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
+ dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
+
+ if (reg & PCIE_LINK_REQ_RST_NOT_INT) {
+ dev_dbg(dev, "hot reset or link-down reset\n");
+ dw_pcie_ep_linkdown(&pci->ep);
+ /* Stop delaying link training. */
+ val = FIELD_PREP_WM16(PCIE_LTSSM_APP_DLY2_DONE, 1);
+ rockchip_pcie_writel_apb(rockchip, val,
+ PCIE_CLIENT_HOT_RESET_CTRL);
+ }
+
+ if (reg & PCIE_RDLH_LINK_UP_CHGED) {
+ if (rockchip_pcie_link_up(pci)) {
+ dev_dbg(dev, "link up\n");
+ dw_pcie_ep_linkup(&pci->ep);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rockchip_pcie_configure_rc(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie_rp *pp;
+ int irq, ret;
+ u32 val;
+
+ if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_HOST))
+ return -ENODEV;
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rockchip_pcie_rc_sys_irq_thread,
+ IRQF_ONESHOT, "pcie-sys-rc", rockchip);
+ if (ret) {
+ dev_err(dev, "failed to request PCIe sys IRQ\n");
+ return ret;
+ }
+
+ /* LTSSM enable control mode */
+ val = FIELD_PREP_WM16(PCIE_LTSSM_ENABLE_ENHANCE, 1);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLIENT_SET_MODE(PCIE_CLIENT_MODE_RC),
+ PCIE_CLIENT_GENERAL_CON);
+
+ pp = &rockchip->pci.pp;
+ pp->ops = &rockchip_pcie_host_ops;
+ pp->use_linkup_irq = true;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ /* unmask DLL up/down indicator */
+ val = FIELD_PREP_WM16(PCIE_RDLH_LINK_UP_CHGED, 0);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
+
+ return ret;
+}
+
+static int rockchip_pcie_configure_ep(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ struct device *dev = &pdev->dev;
+ int irq, ret;
+ u32 val;
+
+ if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_EP))
+ return -ENODEV;
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rockchip_pcie_ep_sys_irq_thread,
+ IRQF_ONESHOT, "pcie-sys-ep", rockchip);
+ if (ret) {
+ dev_err(dev, "failed to request PCIe sys IRQ\n");
+ return ret;
+ }
+
+ /*
+ * LTSSM enable control mode, and automatically delay link training on
+ * hot reset/link-down reset.
+ */
+ val = FIELD_PREP_WM16(PCIE_LTSSM_ENABLE_ENHANCE, 1) |
+ FIELD_PREP_WM16(PCIE_LTSSM_APP_DLY2_EN, 1);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLIENT_SET_MODE(PCIE_CLIENT_MODE_EP),
+ PCIE_CLIENT_GENERAL_CON);
+
+ rockchip->pci.ep.ops = &rockchip_pcie_ep_ops;
+ rockchip->pci.ep.page_size = SZ_64K;
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+
+ ret = dw_pcie_ep_init(&rockchip->pci.ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+
+ ret = dw_pcie_ep_init_registers(&rockchip->pci.ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&rockchip->pci.ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(rockchip->pci.ep.epc);
+
+ /* unmask DLL up/down indicator and hot reset/link-down reset */
+ val = FIELD_PREP_WM16(PCIE_RDLH_LINK_UP_CHGED, 0) |
+ FIELD_PREP_WM16(PCIE_LINK_REQ_RST_NOT_INT, 0);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
+
+ return ret;
+}
+
static int rockchip_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rockchip_pcie *rockchip;
- struct dw_pcie_rp *pp;
+ const struct rockchip_pcie_of_data *data;
int ret;
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
if (!rockchip)
return -ENOMEM;
@@ -299,9 +676,11 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
rockchip->pci.dev = dev;
rockchip->pci.ops = &dw_pcie_ops;
+ rockchip->data = data;
- pp = &rockchip->pci.pp;
- pp->ops = &rockchip_pcie_host_ops;
+ /* Default N_FTS value (210) is broken, override it to 255 */
+ rockchip->pci.n_fts[0] = 255; /* Gen1 */
+ rockchip->pci.n_fts[1] = 255; /* Gen2+ */
ret = rockchip_pcie_resource_get(pdev, rockchip);
if (ret)
@@ -312,23 +691,15 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
return ret;
/* DON'T MOVE ME: must be enable before PHY init */
- rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
- if (IS_ERR(rockchip->vpcie3v3)) {
- if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
- return dev_err_probe(dev, PTR_ERR(rockchip->vpcie3v3),
- "failed to get vpcie3v3 regulator\n");
- rockchip->vpcie3v3 = NULL;
- } else {
- ret = regulator_enable(rockchip->vpcie3v3);
- if (ret) {
- dev_err(dev, "failed to enable vpcie3v3 regulator\n");
- return ret;
- }
- }
+ ret = devm_regulator_get_enable_optional(dev, "vpcie3v3");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret,
+ "failed to enable vpcie3v3 regulator\n");
ret = rockchip_pcie_phy_init(rockchip);
if (ret)
- goto disable_regulator;
+ return dev_err_probe(dev, ret,
+ "failed to initialize the phy\n");
ret = reset_control_deassert(rockchip->rst);
if (ret)
@@ -338,22 +709,60 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (ret)
goto deinit_phy;
- ret = dw_pcie_host_init(pp);
- if (!ret)
- return 0;
+ switch (data->mode) {
+ case DW_PCIE_RC_TYPE:
+ ret = rockchip_pcie_configure_rc(pdev, rockchip);
+ if (ret)
+ goto deinit_clk;
+ break;
+ case DW_PCIE_EP_TYPE:
+ ret = rockchip_pcie_configure_ep(pdev, rockchip);
+ if (ret)
+ goto deinit_clk;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", data->mode);
+ ret = -EINVAL;
+ goto deinit_clk;
+ }
+
+ return 0;
+deinit_clk:
clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks);
deinit_phy:
rockchip_pcie_phy_deinit(rockchip);
-disable_regulator:
- if (rockchip->vpcie3v3)
- regulator_disable(rockchip->vpcie3v3);
return ret;
}
+static const struct rockchip_pcie_of_data rockchip_pcie_rc_of_data_rk3568 = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct rockchip_pcie_of_data rockchip_pcie_ep_of_data_rk3568 = {
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &rockchip_pcie_epc_features_rk3568,
+};
+
+static const struct rockchip_pcie_of_data rockchip_pcie_ep_of_data_rk3588 = {
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &rockchip_pcie_epc_features_rk3588,
+};
+
static const struct of_device_id rockchip_pcie_of_match[] = {
- { .compatible = "rockchip,rk3568-pcie", },
+ {
+ .compatible = "rockchip,rk3568-pcie",
+ .data = &rockchip_pcie_rc_of_data_rk3568,
+ },
+ {
+ .compatible = "rockchip,rk3568-pcie-ep",
+ .data = &rockchip_pcie_ep_of_data_rk3568,
+ },
+ {
+ .compatible = "rockchip,rk3588-pcie-ep",
+ .data = &rockchip_pcie_ep_of_data_rk3588,
+ },
{},
};
diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c
index 0c90583c078b..66367252032b 100644
--- a/drivers/pci/controller/dwc/pcie-fu740.c
+++ b/drivers/pci/controller/dwc/pcie-fu740.c
@@ -279,7 +279,7 @@ static int fu740_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops fu740_pcie_host_ops = {
- .host_init = fu740_pcie_host_init,
+ .init = fu740_pcie_host_init,
};
static const struct dw_pcie_ops dw_pcie_ops = {
@@ -299,6 +299,7 @@ static int fu740_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
pci->pp.ops = &fu740_pcie_host_ops;
+ pci->pp.num_vectors = MAX_MSI_IRQS;
/* SiFive specific region: mgmt */
afp->mgmt_base = devm_platform_ioremap_resource_byname(pdev, "mgmt");
diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c
index 8904b5b85ee5..3c17897e56fc 100644
--- a/drivers/pci/controller/dwc/pcie-hisi.c
+++ b/drivers/pci/controller/dwc/pcie-hisi.c
@@ -15,6 +15,7 @@
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include "../../pci.h"
+#include "../pci-host-common.h"
#if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 43c27812dd6d..a52071589377 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -151,7 +151,7 @@ static struct pci_ops histb_pci_ops = {
.write = histb_pcie_wr_own_conf,
};
-static int histb_pcie_link_up(struct dw_pcie *pci)
+static bool histb_pcie_link_up(struct dw_pcie *pci)
{
struct histb_pcie *hipcie = to_histb_pcie(pci);
u32 regval;
@@ -160,11 +160,8 @@ static int histb_pcie_link_up(struct dw_pcie *pci)
regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0);
status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4);
status &= PCIE_LTSSM_STATE_MASK;
- if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
- (status == PCIE_LTSSM_STATE_ACTIVE))
- return 1;
-
- return 0;
+ return ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
+ (status == PCIE_LTSSM_STATE_ACTIVE));
}
static int histb_pcie_start_link(struct dw_pcie *pci)
@@ -198,7 +195,7 @@ static int histb_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops histb_pcie_host_ops = {
- .host_init = histb_pcie_host_init,
+ .init = histb_pcie_host_init,
};
static void histb_pcie_host_disable(struct histb_pcie *hipcie)
@@ -409,28 +406,30 @@ static int histb_pcie_probe(struct platform_device *pdev)
ret = histb_pcie_host_enable(pp);
if (ret) {
dev_err(dev, "failed to enable host\n");
- return ret;
+ goto err_exit_phy;
}
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "failed to initialize host\n");
- return ret;
+ goto err_exit_phy;
}
return 0;
+
+err_exit_phy:
+ phy_exit(hipcie->phy);
+
+ return ret;
}
-static int histb_pcie_remove(struct platform_device *pdev)
+static void histb_pcie_remove(struct platform_device *pdev)
{
struct histb_pcie *hipcie = platform_get_drvdata(pdev);
histb_pcie_host_disable(hipcie);
- if (hipcie->phy)
- phy_exit(hipcie->phy);
-
- return 0;
+ phy_exit(hipcie->phy);
}
static const struct of_device_id histb_pcie_of_match[] = {
@@ -441,7 +440,7 @@ MODULE_DEVICE_TABLE(of, histb_pcie_of_match);
static struct platform_driver histb_pcie_platform_driver = {
.probe = histb_pcie_probe,
- .remove = histb_pcie_remove,
+ .remove = histb_pcie_remove,
.driver = {
.name = "histb-pcie",
.of_match_table = histb_pcie_of_match,
@@ -450,4 +449,3 @@ static struct platform_driver histb_pcie_platform_driver = {
module_platform_driver(histb_pcie_platform_driver);
MODULE_DESCRIPTION("HiSilicon STB PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index 333c33d98a70..c21906eced61 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -9,9 +9,11 @@
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
#include <linux/pci_regs.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/reset.h>
#include "../../pci.h"
@@ -55,7 +57,6 @@
PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \
PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD)
-#define BUS_IATU_OFFSET SZ_256M
#define RESET_INTERVAL_MS 100
struct intel_pcie {
@@ -130,7 +131,7 @@ static void intel_pcie_link_setup(struct intel_pcie *pcie)
static void intel_pcie_init_n_fts(struct dw_pcie *pci)
{
- switch (pci->link_gen) {
+ switch (pci->max_link_speed) {
case 3:
pci->n_fts[1] = PORT_AFR_N_FTS_GEN3;
break;
@@ -250,7 +251,7 @@ static int intel_pcie_wait_l2(struct intel_pcie *pcie)
int ret;
struct dw_pcie *pci = &pcie->pci;
- if (pci->link_gen < 3)
+ if (pci->max_link_speed < 3)
return 0;
/* Send PME_TURN_OFF message */
@@ -340,15 +341,13 @@ static void __intel_pcie_remove(struct intel_pcie *pcie)
phy_exit(pcie->phy);
}
-static int intel_pcie_remove(struct platform_device *pdev)
+static void intel_pcie_remove(struct platform_device *pdev)
{
struct intel_pcie *pcie = platform_get_drvdata(pdev);
struct dw_pcie_rp *pp = &pcie->pci.pp;
dw_pcie_host_deinit(pp);
__intel_pcie_remove(pcie);
-
- return 0;
}
static int intel_pcie_suspend_noirq(struct device *dev)
@@ -381,17 +380,11 @@ static int intel_pcie_rc_init(struct dw_pcie_rp *pp)
return intel_pcie_host_setup(pcie);
}
-static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
-{
- return cpu_addr + BUS_IATU_OFFSET;
-}
-
static const struct dw_pcie_ops intel_pcie_ops = {
- .cpu_addr_fixup = intel_pcie_cpu_addr,
};
static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
- .host_init = intel_pcie_rc_init,
+ .init = intel_pcie_rc_init,
};
static int intel_pcie_probe(struct platform_device *pdev)
@@ -409,6 +402,7 @@ static int intel_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
pci = &pcie->pci;
pci->dev = dev;
+ pci->use_parent_dt_ranges = true;
pp = &pci->pp;
ret = intel_pcie_get_resources(pdev);
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index f90f36bac018..60e74ac782af 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -101,7 +101,7 @@ static void keembay_pcie_ltssm_set(struct keembay_pcie *pcie, bool enable)
writel(val, pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
}
-static int keembay_pcie_link_up(struct dw_pcie *pci)
+static bool keembay_pcie_link_up(struct dw_pcie *pci)
{
struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
u32 val;
@@ -148,6 +148,13 @@ static const struct dw_pcie_ops keembay_pcie_ops = {
.stop_link = keembay_pcie_stop_link,
};
+static inline void keembay_pcie_disable_clock(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
static inline struct clk *keembay_pcie_probe_clock(struct device *dev,
const char *id, u64 rate)
{
@@ -168,9 +175,7 @@ static inline struct clk *keembay_pcie_probe_clock(struct device *dev,
if (ret)
return ERR_PTR(ret);
- ret = devm_add_action_or_reset(dev,
- (void(*)(void *))clk_disable_unprepare,
- clk);
+ ret = devm_add_action_or_reset(dev, keembay_pcie_disable_clock, clk);
if (ret)
return ERR_PTR(ret);
@@ -284,19 +289,18 @@ static void keembay_pcie_ep_init(struct dw_pcie_ep *ep)
}
static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- /* Legacy interrupts are not supported in Keem Bay */
- dev_err(pci->dev, "Legacy IRQ is not supported\n");
+ case PCI_IRQ_INTX:
+ /* INTx interrupts are not supported in Keem Bay */
+ dev_err(pci->dev, "INTx IRQ is not supported\n");
return -EINVAL;
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "Unknown IRQ type %d\n", type);
@@ -305,11 +309,14 @@ static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features keembay_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
- .reserved_bar = BIT(BAR_1) | BIT(BAR_3) | BIT(BAR_5),
- .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .only_64bit = true, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
.align = SZ_16K,
};
@@ -320,7 +327,7 @@ keembay_pcie_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops keembay_pcie_ep_ops = {
- .ep_init = keembay_pcie_ep_init,
+ .init = keembay_pcie_ep_init,
.raise_irq = keembay_pcie_ep_raise_irq,
.get_features = keembay_pcie_get_features,
};
@@ -388,6 +395,7 @@ static int keembay_pcie_probe(struct platform_device *pdev)
struct keembay_pcie *pcie;
struct dw_pcie *pci;
enum dw_pcie_device_mode mode;
+ int ret;
data = device_get_match_data(dev);
if (!data)
@@ -422,11 +430,26 @@ static int keembay_pcie_probe(struct platform_device *pdev)
return -ENODEV;
pci->ep.ops = &keembay_pcie_ep_ops;
- return dw_pcie_ep_init(&pci->ep);
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
+ break;
default:
dev_err(dev, "Invalid device type %d\n", pcie->mode);
return -ENODEV;
}
+
+ return 0;
}
static const struct keembay_pcie_of_data keembay_pcie_rc_of_data = {
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index d09507f822a7..91559c8b1866 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -12,13 +12,10 @@
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/phy/phy.h>
#include <linux/pci.h>
@@ -79,16 +76,16 @@ struct kirin_pcie {
void *phy_priv; /* only for PCIE_KIRIN_INTERNAL_PHY */
/* DWC PERST# */
- int gpio_id_dwc_perst;
+ struct gpio_desc *id_dwc_perst_gpio;
/* Per-slot PERST# */
int num_slots;
- int gpio_id_reset[MAX_PCI_SLOTS];
+ struct gpio_desc *id_reset_gpio[MAX_PCI_SLOTS];
const char *reset_names[MAX_PCI_SLOTS];
/* Per-slot clkreq */
int n_gpio_clkreq;
- int gpio_id_clkreq[MAX_PCI_SLOTS];
+ struct gpio_desc *id_clkreq_gpio[MAX_PCI_SLOTS];
const char *clkreq_names[MAX_PCI_SLOTS];
};
@@ -219,10 +216,9 @@ static int hi3660_pcie_phy_start(struct hi3660_pcie_phy *phy)
usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_STATUS0);
- if (reg_val & PIPE_CLK_STABLE) {
- dev_err(dev, "PIPE clk is not stable\n");
- return -EINVAL;
- }
+ if (reg_val & PIPE_CLK_STABLE)
+ return dev_err_probe(dev, -ETIMEDOUT,
+ "PIPE clk is not stable\n");
return 0;
}
@@ -367,7 +363,6 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- char name[32];
int ret, i;
/* This is an optional property */
@@ -375,24 +370,27 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
if (ret < 0)
return 0;
- if (ret > MAX_PCI_SLOTS) {
- dev_err(dev, "Too many GPIO clock requests!\n");
- return -EINVAL;
- }
+ if (ret > MAX_PCI_SLOTS)
+ return dev_err_probe(dev, -EINVAL,
+ "Too many GPIO clock requests!\n");
pcie->n_gpio_clkreq = ret;
for (i = 0; i < pcie->n_gpio_clkreq; i++) {
- pcie->gpio_id_clkreq[i] = of_get_named_gpio(dev->of_node,
- "hisilicon,clken-gpios", i);
- if (pcie->gpio_id_clkreq[i] < 0)
- return pcie->gpio_id_clkreq[i];
-
- sprintf(name, "pcie_clkreq_%d", i);
- pcie->clkreq_names[i] = devm_kstrdup_const(dev, name,
- GFP_KERNEL);
+ pcie->id_clkreq_gpio[i] = devm_gpiod_get_index(dev,
+ "hisilicon,clken", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->id_clkreq_gpio[i]))
+ return dev_err_probe(dev, PTR_ERR(pcie->id_clkreq_gpio[i]),
+ "unable to get a valid clken gpio\n");
+
+ pcie->clkreq_names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "pcie_clkreq_%d", i);
if (!pcie->clkreq_names[i])
return -ENOMEM;
+
+ gpiod_set_consumer_name(pcie->id_clkreq_gpio[i],
+ pcie->clkreq_names[i]);
}
return 0;
@@ -403,57 +401,55 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
struct device_node *node)
{
struct device *dev = &pdev->dev;
- struct device_node *parent, *child;
int ret, slot, i;
- char name[32];
- for_each_available_child_of_node(node, parent) {
- for_each_available_child_of_node(parent, child) {
+ for_each_available_child_of_node_scoped(node, parent) {
+ for_each_available_child_of_node_scoped(parent, child) {
i = pcie->num_slots;
- pcie->gpio_id_reset[i] = of_get_named_gpio(child,
- "reset-gpios", 0);
- if (pcie->gpio_id_reset[i] < 0)
- continue;
+ pcie->id_reset_gpio[i] = devm_fwnode_gpiod_get_index(dev,
+ of_fwnode_handle(child),
+ "reset", 0, GPIOD_OUT_LOW,
+ NULL);
+ if (IS_ERR(pcie->id_reset_gpio[i])) {
+ if (PTR_ERR(pcie->id_reset_gpio[i]) == -ENOENT)
+ continue;
+ return dev_err_probe(dev, PTR_ERR(pcie->id_reset_gpio[i]),
+ "unable to get a valid reset gpio\n");
+ }
+
+ if (pcie->num_slots + 1 >= MAX_PCI_SLOTS)
+ return dev_err_probe(dev, -EINVAL,
+ "Too many PCI slots!\n");
pcie->num_slots++;
- if (pcie->num_slots > MAX_PCI_SLOTS) {
- dev_err(dev, "Too many PCI slots!\n");
- ret = -EINVAL;
- goto put_node;
- }
ret = of_pci_get_devfn(child);
- if (ret < 0) {
- dev_err(dev, "failed to parse devfn: %d\n", ret);
- goto put_node;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to parse devfn\n");
slot = PCI_SLOT(ret);
- sprintf(name, "pcie_perst_%d", slot);
- pcie->reset_names[i] = devm_kstrdup_const(dev, name,
- GFP_KERNEL);
- if (!pcie->reset_names[i]) {
- ret = -ENOMEM;
- goto put_node;
- }
+ pcie->reset_names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "pcie_perst_%d",
+ slot);
+ if (!pcie->reset_names[i])
+ return -ENOMEM;
+
+ gpiod_set_consumer_name(pcie->id_reset_gpio[i],
+ pcie->reset_names[i]);
}
}
return 0;
-
-put_node:
- of_node_put(child);
- of_node_put(parent);
- return ret;
}
static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *child, *node = dev->of_node;
+ struct device_node *node = dev->of_node;
void __iomem *apb_base;
int ret;
@@ -467,31 +463,24 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
return PTR_ERR(kirin_pcie->apb);
/* pcie internal PERST# gpio */
- kirin_pcie->gpio_id_dwc_perst = of_get_named_gpio(dev->of_node,
- "reset-gpios", 0);
- if (kirin_pcie->gpio_id_dwc_perst == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (!gpio_is_valid(kirin_pcie->gpio_id_dwc_perst)) {
- dev_err(dev, "unable to get a valid gpio pin\n");
- return -ENODEV;
- }
+ kirin_pcie->id_dwc_perst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(kirin_pcie->id_dwc_perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(kirin_pcie->id_dwc_perst_gpio),
+ "unable to get a valid gpio pin\n");
+ gpiod_set_consumer_name(kirin_pcie->id_dwc_perst_gpio, "pcie_perst_bridge");
ret = kirin_pcie_get_gpio_enable(kirin_pcie, pdev);
if (ret)
return ret;
/* Parse OF children */
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
ret = kirin_pcie_parse_port(kirin_pcie, pdev, child);
if (ret)
- goto put_node;
+ return ret;
}
return 0;
-
-put_node:
- of_node_put(child);
- return ret;
}
static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
@@ -557,7 +546,7 @@ static int kirin_pcie_add_bus(struct pci_bus *bus)
/* Send PERST# to each slot */
for (i = 0; i < kirin_pcie->num_slots; i++) {
- ret = gpio_direction_output(kirin_pcie->gpio_id_reset[i], 1);
+ ret = gpiod_direction_output_raw(kirin_pcie->id_reset_gpio[i], 1);
if (ret) {
dev_err(pci->dev, "PERST# %s error: %d\n",
kirin_pcie->reset_names[i], ret);
@@ -597,16 +586,13 @@ static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
}
-static int kirin_pcie_link_up(struct dw_pcie *pci)
+static bool kirin_pcie_link_up(struct dw_pcie *pci)
{
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
u32 val;
regmap_read(kirin_pcie->apb, PCIE_APB_PHY_STATUS0, &val);
- if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
- return 1;
-
- return 0;
+ return (val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE;
}
static int kirin_pcie_start_link(struct dw_pcie *pci)
@@ -627,44 +613,6 @@ static int kirin_pcie_host_init(struct dw_pcie_rp *pp)
return 0;
}
-static int kirin_pcie_gpio_request(struct kirin_pcie *kirin_pcie,
- struct device *dev)
-{
- int ret, i;
-
- for (i = 0; i < kirin_pcie->num_slots; i++) {
- if (!gpio_is_valid(kirin_pcie->gpio_id_reset[i])) {
- dev_err(dev, "unable to get a valid %s gpio\n",
- kirin_pcie->reset_names[i]);
- return -ENODEV;
- }
-
- ret = devm_gpio_request(dev, kirin_pcie->gpio_id_reset[i],
- kirin_pcie->reset_names[i]);
- if (ret)
- return ret;
- }
-
- for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++) {
- if (!gpio_is_valid(kirin_pcie->gpio_id_clkreq[i])) {
- dev_err(dev, "unable to get a valid %s gpio\n",
- kirin_pcie->clkreq_names[i]);
- return -ENODEV;
- }
-
- ret = devm_gpio_request(dev, kirin_pcie->gpio_id_clkreq[i],
- kirin_pcie->clkreq_names[i]);
- if (ret)
- return ret;
-
- ret = gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 0);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static const struct dw_pcie_ops kirin_dw_pcie_ops = {
.read_dbi = kirin_pcie_read_dbi,
.write_dbi = kirin_pcie_write_dbi,
@@ -673,7 +621,7 @@ static const struct dw_pcie_ops kirin_dw_pcie_ops = {
};
static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
- .host_init = kirin_pcie_host_init,
+ .init = kirin_pcie_host_init,
};
static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie)
@@ -684,7 +632,7 @@ static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie)
return hi3660_pcie_phy_power_off(kirin_pcie);
for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++)
- gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 1);
+ gpiod_direction_output_raw(kirin_pcie->id_clkreq_gpio[i], 1);
phy_power_off(kirin_pcie->phy);
phy_exit(kirin_pcie->phy);
@@ -711,10 +659,6 @@ static int kirin_pcie_power_on(struct platform_device *pdev,
if (IS_ERR(kirin_pcie->phy))
return PTR_ERR(kirin_pcie->phy);
- ret = kirin_pcie_gpio_request(kirin_pcie, dev);
- if (ret)
- return ret;
-
ret = phy_init(kirin_pcie->phy);
if (ret)
goto err;
@@ -727,11 +671,9 @@ static int kirin_pcie_power_on(struct platform_device *pdev,
/* perst assert Endpoint */
usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
- if (!gpio_request(kirin_pcie->gpio_id_dwc_perst, "pcie_perst_bridge")) {
- ret = gpio_direction_output(kirin_pcie->gpio_id_dwc_perst, 1);
- if (ret)
- goto err;
- }
+ ret = gpiod_direction_output_raw(kirin_pcie->id_dwc_perst_gpio, 1);
+ if (ret)
+ goto err;
usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
@@ -742,15 +684,13 @@ err:
return ret;
}
-static int __exit kirin_pcie_remove(struct platform_device *pdev)
+static void kirin_pcie_remove(struct platform_device *pdev)
{
struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev);
dw_pcie_host_deinit(&kirin_pcie->pci->pp);
kirin_pcie_power_off(kirin_pcie);
-
- return 0;
}
struct kirin_pcie_data {
@@ -779,16 +719,9 @@ static int kirin_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
int ret;
- if (!dev->of_node) {
- dev_err(dev, "NULL node\n");
- return -EINVAL;
- }
-
data = of_device_get_match_data(dev);
- if (!data) {
- dev_err(dev, "OF data missing\n");
- return -EINVAL;
- }
+ if (!data)
+ return dev_err_probe(dev, -EINVAL, "OF data missing\n");
kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
if (!kirin_pcie)
@@ -819,7 +752,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
static struct platform_driver kirin_pcie_driver = {
.probe = kirin_pcie_probe,
- .remove = __exit_p(kirin_pcie_remove),
+ .remove = kirin_pcie_remove,
.driver = {
.name = "kirin-pcie",
.of_match_table = kirin_pcie_match,
diff --git a/drivers/pci/controller/dwc/pcie-nxp-s32g.c b/drivers/pci/controller/dwc/pcie-nxp-s32g.c
new file mode 100644
index 000000000000..47745749f75c
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-nxp-s32g.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for NXP S32G SoCs
+ *
+ * Copyright 2019-2025 NXP
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+/* PCIe controller Sub-System */
+
+/* PCIe controller 0 General Control 1 */
+#define PCIE_S32G_PE0_GEN_CTRL_1 0x50
+#define DEVICE_TYPE_MASK GENMASK(3, 0)
+#define SRIS_MODE BIT(8)
+
+/* PCIe controller 0 General Control 3 */
+#define PCIE_S32G_PE0_GEN_CTRL_3 0x58
+#define LTSSM_EN BIT(0)
+
+/* PCIe Controller 0 Interrupt Status */
+#define PCIE_S32G_PE0_INT_STS 0xE8
+#define HP_INT_STS BIT(6)
+
+/* Boundary between peripheral space and physical memory space */
+#define S32G_MEMORY_BOUNDARY_ADDR 0x80000000
+
+struct s32g_pcie_port {
+ struct list_head list;
+ struct phy *phy;
+};
+
+struct s32g_pcie {
+ struct dw_pcie pci;
+ void __iomem *ctrl_base;
+ struct list_head ports;
+};
+
+#define to_s32g_from_dw_pcie(x) \
+ container_of(x, struct s32g_pcie, pci)
+
+static void s32g_pcie_writel_ctrl(struct s32g_pcie *s32g_pp, u32 reg, u32 val)
+{
+ writel(val, s32g_pp->ctrl_base + reg);
+}
+
+static u32 s32g_pcie_readl_ctrl(struct s32g_pcie *s32g_pp, u32 reg)
+{
+ return readl(s32g_pp->ctrl_base + reg);
+}
+
+static void s32g_pcie_enable_ltssm(struct s32g_pcie *s32g_pp)
+{
+ u32 reg;
+
+ reg = s32g_pcie_readl_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_3);
+ reg |= LTSSM_EN;
+ s32g_pcie_writel_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_3, reg);
+}
+
+static void s32g_pcie_disable_ltssm(struct s32g_pcie *s32g_pp)
+{
+ u32 reg;
+
+ reg = s32g_pcie_readl_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_3);
+ reg &= ~LTSSM_EN;
+ s32g_pcie_writel_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_3, reg);
+}
+
+static int s32g_pcie_start_link(struct dw_pcie *pci)
+{
+ struct s32g_pcie *s32g_pp = to_s32g_from_dw_pcie(pci);
+
+ s32g_pcie_enable_ltssm(s32g_pp);
+
+ return 0;
+}
+
+static void s32g_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct s32g_pcie *s32g_pp = to_s32g_from_dw_pcie(pci);
+
+ s32g_pcie_disable_ltssm(s32g_pp);
+}
+
+static struct dw_pcie_ops s32g_pcie_ops = {
+ .start_link = s32g_pcie_start_link,
+ .stop_link = s32g_pcie_stop_link,
+};
+
+/* Configure the AMBA AXI Coherency Extensions (ACE) interface */
+static void s32g_pcie_reset_mstr_ace(struct dw_pcie *pci)
+{
+ u32 ddr_base_low = lower_32_bits(S32G_MEMORY_BOUNDARY_ADDR);
+ u32 ddr_base_high = upper_32_bits(S32G_MEMORY_BOUNDARY_ADDR);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, COHERENCY_CONTROL_3_OFF, 0x0);
+
+ /*
+ * Ncore is a cache-coherent interconnect module that enables the
+ * integration of heterogeneous coherent and non-coherent agents in
+ * the chip. Ncore transactions to peripheral should be non-coherent
+ * or it might drop them.
+ *
+ * One example where this is needed are PCIe MSIs, which use NoSnoop=0
+ * and might end up routed to Ncore. PCIe coherent traffic (e.g. MSIs)
+ * that targets peripheral space will be dropped by Ncore because
+ * peripherals on S32G are not coherent as slaves. We add a hard
+ * boundary in the PCIe controller coherency control registers to
+ * separate physical memory space from peripheral space.
+ *
+ * Define the start of DDR as seen by Linux as this boundary between
+ * "memory" and "peripherals", with peripherals being below.
+ */
+ dw_pcie_writel_dbi(pci, COHERENCY_CONTROL_1_OFF,
+ (ddr_base_low & CFG_MEMTYPE_BOUNDARY_LOW_ADDR_MASK));
+ dw_pcie_writel_dbi(pci, COHERENCY_CONTROL_2_OFF, ddr_base_high);
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static int s32g_init_pcie_controller(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct s32g_pcie *s32g_pp = to_s32g_from_dw_pcie(pci);
+ u32 val;
+
+ /* Set RP mode */
+ val = s32g_pcie_readl_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_1);
+ val &= ~DEVICE_TYPE_MASK;
+ val |= FIELD_PREP(DEVICE_TYPE_MASK, PCI_EXP_TYPE_ROOT_PORT);
+
+ /* Use default CRNS */
+ val &= ~SRIS_MODE;
+
+ s32g_pcie_writel_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_1, val);
+
+ /*
+ * Make sure we use the coherency defaults (just in case the settings
+ * have been changed from their reset values)
+ */
+ s32g_pcie_reset_mstr_ace(pci);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_FORCE);
+ val |= PORT_FORCE_DO_DESKEW_FOR_SRIS;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_FORCE, val);
+
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val |= GEN3_RELATED_OFF_EQ_PHASE_2_3;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops s32g_pcie_host_ops = {
+ .init = s32g_init_pcie_controller,
+};
+
+static int s32g_init_pcie_phy(struct s32g_pcie *s32g_pp)
+{
+ struct dw_pcie *pci = &s32g_pp->pci;
+ struct device *dev = pci->dev;
+ struct s32g_pcie_port *port, *tmp;
+ int ret;
+
+ list_for_each_entry(port, &s32g_pp->ports, list) {
+ ret = phy_init(port->phy);
+ if (ret) {
+ dev_err(dev, "Failed to init serdes PHY\n");
+ goto err_phy_revert;
+ }
+
+ ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, 0);
+ if (ret) {
+ dev_err(dev, "Failed to set mode on serdes PHY\n");
+ goto err_phy_exit;
+ }
+
+ ret = phy_power_on(port->phy);
+ if (ret) {
+ dev_err(dev, "Failed to power on serdes PHY\n");
+ goto err_phy_exit;
+ }
+ }
+
+ return 0;
+
+err_phy_exit:
+ phy_exit(port->phy);
+
+err_phy_revert:
+ list_for_each_entry_continue_reverse(port, &s32g_pp->ports, list) {
+ phy_power_off(port->phy);
+ phy_exit(port->phy);
+ }
+
+ list_for_each_entry_safe(port, tmp, &s32g_pp->ports, list)
+ list_del(&port->list);
+
+ return ret;
+}
+
+static void s32g_deinit_pcie_phy(struct s32g_pcie *s32g_pp)
+{
+ struct s32g_pcie_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &s32g_pp->ports, list) {
+ phy_power_off(port->phy);
+ phy_exit(port->phy);
+ list_del(&port->list);
+ }
+}
+
+static int s32g_pcie_init(struct device *dev, struct s32g_pcie *s32g_pp)
+{
+ s32g_pcie_disable_ltssm(s32g_pp);
+
+ return s32g_init_pcie_phy(s32g_pp);
+}
+
+static void s32g_pcie_deinit(struct s32g_pcie *s32g_pp)
+{
+ s32g_pcie_disable_ltssm(s32g_pp);
+
+ s32g_deinit_pcie_phy(s32g_pp);
+}
+
+static int s32g_pcie_parse_port(struct s32g_pcie *s32g_pp, struct device_node *node)
+{
+ struct device *dev = s32g_pp->pci.dev;
+ struct s32g_pcie_port *port;
+ int num_lanes;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->phy = devm_of_phy_get(dev, node, NULL);
+ if (IS_ERR(port->phy))
+ return dev_err_probe(dev, PTR_ERR(port->phy),
+ "Failed to get serdes PHY\n");
+
+ INIT_LIST_HEAD(&port->list);
+ list_add_tail(&port->list, &s32g_pp->ports);
+
+ /*
+ * The DWC core initialization code cannot yet parse the num-lanes
+ * attribute in the Root Port node. The S32G only supports one Root
+ * Port for now so its driver can parse the node and set the num_lanes
+ * field of struct dwc_pcie before calling dw_pcie_host_init().
+ */
+ if (!of_property_read_u32(node, "num-lanes", &num_lanes))
+ s32g_pp->pci.num_lanes = num_lanes;
+
+ return 0;
+}
+
+static int s32g_pcie_parse_ports(struct device *dev, struct s32g_pcie *s32g_pp)
+{
+ struct s32g_pcie_port *port, *tmp;
+ int ret = -ENOENT;
+
+ for_each_available_child_of_node_scoped(dev->of_node, of_port) {
+ if (!of_node_is_type(of_port, "pci"))
+ continue;
+
+ ret = s32g_pcie_parse_port(s32g_pp, of_port);
+ if (ret)
+ goto err_port;
+ }
+
+err_port:
+ list_for_each_entry_safe(port, tmp, &s32g_pp->ports, list)
+ list_del(&port->list);
+
+ return ret;
+}
+
+static int s32g_pcie_get_resources(struct platform_device *pdev,
+ struct s32g_pcie *s32g_pp)
+{
+ struct dw_pcie *pci = &s32g_pp->pci;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pci->dev = dev;
+ pci->ops = &s32g_pcie_ops;
+
+ s32g_pp->ctrl_base = devm_platform_ioremap_resource_byname(pdev, "ctrl");
+ if (IS_ERR(s32g_pp->ctrl_base))
+ return PTR_ERR(s32g_pp->ctrl_base);
+
+ INIT_LIST_HEAD(&s32g_pp->ports);
+
+ ret = s32g_pcie_parse_ports(dev, s32g_pp);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to parse Root Port: %d\n", ret);
+
+ platform_set_drvdata(pdev, s32g_pp);
+
+ return 0;
+}
+
+static int s32g_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct s32g_pcie *s32g_pp;
+ struct dw_pcie_rp *pp;
+ int ret;
+
+ s32g_pp = devm_kzalloc(dev, sizeof(*s32g_pp), GFP_KERNEL);
+ if (!s32g_pp)
+ return -ENOMEM;
+
+ ret = s32g_pcie_get_resources(pdev, s32g_pp);
+ if (ret)
+ return ret;
+
+ pm_runtime_no_callbacks(dev);
+ devm_pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err_pm_runtime_put;
+
+ ret = s32g_pcie_init(dev, s32g_pp);
+ if (ret)
+ goto err_pm_runtime_put;
+
+ pp = &s32g_pp->pci.pp;
+ pp->ops = &s32g_pcie_host_ops;
+ pp->use_atu_msg = true;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret)
+ goto err_pcie_deinit;
+
+ return 0;
+
+err_pcie_deinit:
+ s32g_pcie_deinit(s32g_pp);
+err_pm_runtime_put:
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
+static int s32g_pcie_suspend_noirq(struct device *dev)
+{
+ struct s32g_pcie *s32g_pp = dev_get_drvdata(dev);
+ struct dw_pcie *pci = &s32g_pp->pci;
+
+ return dw_pcie_suspend_noirq(pci);
+}
+
+static int s32g_pcie_resume_noirq(struct device *dev)
+{
+ struct s32g_pcie *s32g_pp = dev_get_drvdata(dev);
+ struct dw_pcie *pci = &s32g_pp->pci;
+
+ return dw_pcie_resume_noirq(pci);
+}
+
+static const struct dev_pm_ops s32g_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(s32g_pcie_suspend_noirq,
+ s32g_pcie_resume_noirq)
+};
+
+static const struct of_device_id s32g_pcie_of_match[] = {
+ { .compatible = "nxp,s32g2-pcie" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, s32g_pcie_of_match);
+
+static struct platform_driver s32g_pcie_driver = {
+ .driver = {
+ .name = "s32g-pcie",
+ .of_match_table = s32g_pcie_of_match,
+ .suppress_bind_attrs = true,
+ .pm = pm_sleep_ptr(&s32g_pcie_pm_ops),
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = s32g_pcie_probe,
+};
+
+builtin_platform_driver(s32g_pcie_driver);
+
+MODULE_AUTHOR("Ionut Vicovan <Ionut.Vicovan@nxp.com>");
+MODULE_DESCRIPTION("NXP S32G PCIe Host controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.c b/drivers/pci/controller/dwc/pcie-qcom-common.c
new file mode 100644
index 000000000000..01c5387e53bf
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pci.h>
+
+#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
+
+void qcom_pcie_common_set_equalization(struct dw_pcie *pci)
+{
+ struct device *dev = pci->dev;
+ u32 reg;
+ u16 speed;
+
+ /*
+ * GEN3_RELATED_OFF register is repurposed to apply equalization
+ * settings at various data transmission rates through registers namely
+ * GEN3_EQ_*. The RATE_SHADOW_SEL bit field of GEN3_RELATED_OFF
+ * determines the data rate for which these equalization settings are
+ * applied.
+ */
+
+ for (speed = PCIE_SPEED_8_0GT; speed <= pcie_link_speed[pci->max_link_speed]; speed++) {
+ if (speed > PCIE_SPEED_32_0GT) {
+ dev_warn(dev, "Skipped equalization settings for unsupported data rate\n");
+ break;
+ }
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ reg &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ reg &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ reg |= FIELD_PREP(GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK,
+ speed - PCIE_SPEED_8_0GT);
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF);
+ reg &= ~(GEN3_EQ_FMDC_T_MIN_PHASE23 |
+ GEN3_EQ_FMDC_N_EVALS |
+ GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA |
+ GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA);
+ reg |= FIELD_PREP(GEN3_EQ_FMDC_T_MIN_PHASE23, 0x1) |
+ FIELD_PREP(GEN3_EQ_FMDC_N_EVALS, 0xd) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA, 0x5) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA, 0x5);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+ reg &= ~(GEN3_EQ_CONTROL_OFF_FB_MODE |
+ GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE |
+ GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL |
+ GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, reg);
+ }
+}
+EXPORT_SYMBOL_GPL(qcom_pcie_common_set_equalization);
+
+void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci)
+{
+ u32 reg;
+
+ reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_1_OFF);
+ reg &= ~(MARGINING_MAX_VOLTAGE_OFFSET |
+ MARGINING_NUM_VOLTAGE_STEPS |
+ MARGINING_MAX_TIMING_OFFSET |
+ MARGINING_NUM_TIMING_STEPS);
+ reg |= FIELD_PREP(MARGINING_MAX_VOLTAGE_OFFSET, 0x24) |
+ FIELD_PREP(MARGINING_NUM_VOLTAGE_STEPS, 0x78) |
+ FIELD_PREP(MARGINING_MAX_TIMING_OFFSET, 0x32) |
+ FIELD_PREP(MARGINING_NUM_TIMING_STEPS, 0x10);
+ dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_1_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_2_OFF);
+ reg |= MARGINING_IND_ERROR_SAMPLER |
+ MARGINING_SAMPLE_REPORTING_METHOD |
+ MARGINING_IND_LEFT_RIGHT_TIMING |
+ MARGINING_VOLTAGE_SUPPORTED;
+ reg &= ~(MARGINING_IND_UP_DOWN_VOLTAGE |
+ MARGINING_MAXLANES |
+ MARGINING_SAMPLE_RATE_TIMING |
+ MARGINING_SAMPLE_RATE_VOLTAGE);
+ reg |= FIELD_PREP(MARGINING_MAXLANES, pci->num_lanes) |
+ FIELD_PREP(MARGINING_SAMPLE_RATE_TIMING, 0x3f) |
+ FIELD_PREP(MARGINING_SAMPLE_RATE_VOLTAGE, 0x3f);
+ dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_2_OFF, reg);
+}
+EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_lane_margining);
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.h b/drivers/pci/controller/dwc/pcie-qcom-common.h
new file mode 100644
index 000000000000..7f5ca2fd9a72
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _PCIE_QCOM_COMMON_H
+#define _PCIE_QCOM_COMMON_H
+
+struct dw_pcie;
+
+void qcom_pcie_common_set_equalization(struct dw_pcie *pci);
+void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci);
+
+#endif
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 19b32839ea26..f1bc0ac81a92 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -13,6 +13,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/interconnect.h>
#include <linux/mfd/syscon.h>
#include <linux/phy/pcie.h>
#include <linux/phy/phy.h>
@@ -22,7 +23,9 @@
#include <linux/reset.h>
#include <linux/module.h>
+#include "../../pci.h"
#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
/* PARF registers */
#define PARF_SYS_CTRL 0x00
@@ -45,6 +48,7 @@
#define PARF_DBI_BASE_ADDR_HI 0x354
#define PARF_SLV_ADDR_SPACE_SIZE 0x358
#define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c
+#define PARF_NO_SNOOP_OVERRIDE 0x3d4
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_SRIS_MODE 0x644
@@ -55,6 +59,8 @@
#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_CFG 0x2c00
+#define PARF_INT_ALL_5_MASK 0x2dcc
+#define PARF_INT_ALL_3_MASK 0x2e18
/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
#define PARF_INT_ALL_LINK_DOWN BIT(1)
@@ -74,6 +80,7 @@
#define PARF_INT_ALL_PLS_ERR BIT(15)
#define PARF_INT_ALL_PME_LEGACY BIT(16)
#define PARF_INT_ALL_PLS_PME BIT(17)
+#define PARF_INT_ALL_EDMA BIT(22)
/* PARF_BDF_TO_SID_CFG register fields */
#define PARF_BDF_TO_SID_BYPASS BIT(0)
@@ -83,6 +90,10 @@
#define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2)
#define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3)
+/* PARF_NO_SNOOP_OVERRIDE register fields */
+#define WR_NO_SNOOP_OVERRIDE_EN BIT(1)
+#define RD_NO_SNOOP_OVERRIDE_EN BIT(3)
+
/* PARF_DEVICE_TYPE register fields */
#define PARF_DEVICE_TYPE_EP 0x0
@@ -119,8 +130,15 @@
/* PARF_CFG_BITS register fields */
#define PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN BIT(1)
+/* PARF_INT_ALL_5_MASK fields */
+#define PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR BIT(0)
+
+/* PARF_INT_ALL_3_MASK fields */
+#define PARF_INT_ALL_3_PTM_UPDATING BIT(4)
+
/* ELBI registers */
#define ELBI_SYS_STTS 0x08
+#define ELBI_CS2_ENABLE 0xa4
/* DBI registers */
#define DBI_CON_STATUS 0x44
@@ -133,6 +151,9 @@
#define CORE_RESET_TIME_US_MAX 1005
#define WAKE_DELAY_US 2000 /* 2 ms */
+#define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
+ Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
+
#define to_pcie_ep(x) dev_get_drvdata((x)->dev)
enum qcom_pcie_ep_link_status {
@@ -143,10 +164,21 @@ enum qcom_pcie_ep_link_status {
};
/**
+ * struct qcom_pcie_ep_cfg - Per SoC config struct
+ * @hdma_support: HDMA support on this SoC
+ * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache snooping
+ * @disable_mhi_ram_parity_check: Disable MHI RAM data parity error check
+ */
+struct qcom_pcie_ep_cfg {
+ bool hdma_support;
+ bool override_no_snoop;
+ bool disable_mhi_ram_parity_check;
+};
+
+/**
* struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller
* @pci: Designware PCIe controller struct
* @parf: Qualcomm PCIe specific PARF register base
- * @elbi: Designware PCIe specific ELBI register base
* @mmio: MMIO register base
* @perst_map: PERST regmap
* @mmio_res: MMIO region resource
@@ -155,10 +187,12 @@ enum qcom_pcie_ep_link_status {
* @wake: WAKE# GPIO
* @phy: PHY controller block
* @debugfs: PCIe Endpoint Debugfs directory
+ * @icc_mem: Handle to an interconnect path between PCIe and MEM
* @clks: PCIe clocks
* @num_clks: PCIe clocks count
* @perst_en: Flag for PERST enable
* @perst_sep_en: Flag for PERST separation enable
+ * @cfg: PCIe EP config struct
* @link_status: PCIe Link status
* @global_irq: Qualcomm PCIe specific Global IRQ
* @perst_irq: PERST# IRQ
@@ -167,7 +201,6 @@ struct qcom_pcie_ep {
struct dw_pcie pci;
void __iomem *parf;
- void __iomem *elbi;
void __iomem *mmio;
struct regmap *perst_map;
struct resource *mmio_res;
@@ -178,12 +211,15 @@ struct qcom_pcie_ep {
struct phy *phy;
struct dentry *debugfs;
+ struct icc_path *icc_mem;
+
struct clk_bulk_data *clks;
int num_clks;
u32 perst_en;
u32 perst_sep_en;
+ const struct qcom_pcie_ep_cfg *cfg;
enum qcom_pcie_ep_link_status link_status;
int global_irq;
int perst_irq;
@@ -227,12 +263,11 @@ static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep)
}
}
-static int qcom_pcie_dw_link_up(struct dw_pcie *pci)
+static bool qcom_pcie_dw_link_up(struct dw_pcie *pci)
{
- struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
u32 reg;
- reg = readl_relaxed(pcie_ep->elbi + ELBI_SYS_STTS);
+ reg = readl_relaxed(pci->elbi_base + ELBI_SYS_STTS);
return reg & XMLH_LINK_UP;
}
@@ -253,8 +288,45 @@ static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
disable_irq(pcie_ep->perst_irq);
}
+static void qcom_pcie_dw_write_dbi2(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ int ret;
+
+ writel(1, pci->elbi_base + ELBI_CS2_ENABLE);
+
+ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Failed to write DBI2 register (0x%x): %d\n", reg, ret);
+
+ writel(0, pci->elbi_base + ELBI_CS2_ENABLE);
+}
+
+static void qcom_pcie_ep_icc_update(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+ u32 offset, status;
+ int speed, width;
+ int ret;
+
+ if (!pcie_ep->icc_mem)
+ return;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
+
+ speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
+ width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
+
+ ret = icc_set_bw(pcie_ep->icc_mem, 0, width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
+ if (ret)
+ dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ ret);
+}
+
static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
{
+ struct dw_pcie *pci = &pcie_ep->pci;
int ret;
ret = clk_bulk_prepare_enable(pcie_ep->num_clks, pcie_ep->clks);
@@ -277,8 +349,24 @@ static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
if (ret)
goto err_phy_exit;
+ /*
+ * Some Qualcomm platforms require interconnect bandwidth constraints
+ * to be set before enabling interconnect clocks.
+ *
+ * Set an initial peak bandwidth corresponding to single-lane Gen 1
+ * for the pcie-mem path.
+ */
+ ret = icc_set_bw(pcie_ep->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));
+ if (ret) {
+ dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ ret);
+ goto err_phy_off;
+ }
+
return 0;
+err_phy_off:
+ phy_power_off(pcie_ep->phy);
err_phy_exit:
phy_exit(pcie_ep->phy);
err_disable_clk:
@@ -289,6 +377,7 @@ err_disable_clk:
static void qcom_pcie_disable_resources(struct qcom_pcie_ep *pcie_ep)
{
+ icc_set_bw(pcie_ep->icc_mem, 0, 0);
phy_power_off(pcie_ep->phy);
phy_exit(pcie_ep->phy);
clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks);
@@ -307,6 +396,10 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
return ret;
}
+ /* Perform cleanup that requires refclk */
+ pci_epc_deinit_notify(pci->ep.epc);
+ dw_pcie_ep_cleanup(&pci->ep);
+
/* Assert WAKE# to RC to indicate device is ready */
gpiod_set_value_cansleep(pcie_ep->wake, 1);
usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500);
@@ -395,15 +488,30 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
writel_relaxed(0, pcie_ep->parf + PARF_INT_ALL_MASK);
val = PARF_INT_ALL_LINK_DOWN | PARF_INT_ALL_BME |
PARF_INT_ALL_PM_TURNOFF | PARF_INT_ALL_DSTATE_CHANGE |
- PARF_INT_ALL_LINK_UP;
+ PARF_INT_ALL_LINK_UP | PARF_INT_ALL_EDMA;
writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK);
- ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep);
+ if (pcie_ep->cfg && pcie_ep->cfg->disable_mhi_ram_parity_check) {
+ val = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_5_MASK);
+ val &= ~PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR;
+ writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_5_MASK);
+ }
+
+ val = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_3_MASK);
+ val &= ~PARF_INT_ALL_3_PTM_UPDATING;
+ writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_3_MASK);
+
+ ret = dw_pcie_ep_init_registers(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
goto err_disable_resources;
}
+ qcom_pcie_common_set_equalization(pci);
+
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT)
+ qcom_pcie_common_set_16gt_lane_margining(pci);
+
/*
* The physical address of the MMIO region which is exposed as the BAR
* should be written to MHI BASE registers.
@@ -415,15 +523,19 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
/* Gate Master AXI clock to MHI bus during L1SS */
val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
val &= ~PARF_MSTR_AXI_CLK_EN;
- val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ writel_relaxed(val, pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
- dw_pcie_ep_init_notify(&pcie_ep->pci.ep);
+ pci_epc_init_notify(pcie_ep->pci.ep.epc);
/* Enable LTSSM */
val = readl_relaxed(pcie_ep->parf + PARF_LTSSM);
val |= BIT(8);
writel_relaxed(val, pcie_ep->parf + PARF_LTSSM);
+ if (pcie_ep->cfg && pcie_ep->cfg->override_no_snoop)
+ writel_relaxed(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
+ pcie_ep->parf + PARF_NO_SNOOP_OVERRIDE);
+
return 0;
err_disable_resources:
@@ -435,12 +547,6 @@ err_disable_resources:
static void qcom_pcie_perst_assert(struct dw_pcie *pci)
{
struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
- struct device *dev = pci->dev;
-
- if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) {
- dev_dbg(dev, "Link is already disabled\n");
- return;
- }
qcom_pcie_disable_resources(pcie_ep);
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
@@ -451,6 +557,7 @@ static const struct dw_pcie_ops pci_ops = {
.link_up = qcom_pcie_dw_link_up,
.start_link = qcom_pcie_dw_start_link,
.stop_link = qcom_pcie_dw_stop_link,
+ .write_dbi2 = qcom_pcie_dw_write_dbi2,
};
static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
@@ -472,11 +579,6 @@ static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
return PTR_ERR(pci->dbi_base);
pci->dbi_base2 = pci->dbi_base;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
- pcie_ep->elbi = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pcie_ep->elbi))
- return PTR_ERR(pcie_ep->elbi);
-
pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"mmio");
if (!pcie_ep->mmio_res) {
@@ -550,6 +652,10 @@ static int qcom_pcie_ep_get_resources(struct platform_device *pdev,
if (IS_ERR(pcie_ep->phy))
ret = PTR_ERR(pcie_ep->phy);
+ pcie_ep->icc_mem = devm_of_icc_get(dev, "pcie-mem");
+ if (IS_ERR(pcie_ep->icc_mem))
+ ret = PTR_ERR(pcie_ep->icc_mem);
+
return ret;
}
@@ -560,18 +666,19 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
struct dw_pcie *pci = &pcie_ep->pci;
struct device *dev = pci->dev;
u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS);
- u32 mask = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_MASK);
u32 dstate, val;
writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR);
- status &= mask;
if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
dev_dbg(dev, "Received Linkdown event\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN;
+ dw_pcie_ep_linkdown(&pci->ep);
} else if (FIELD_GET(PARF_INT_ALL_BME, status)) {
- dev_dbg(dev, "Received BME event. Link is enabled!\n");
+ dev_dbg(dev, "Received Bus Master Enable event\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED;
+ qcom_pcie_ep_icc_update(pcie_ep);
+ pci_epc_bus_master_enable_notify(pci->ep.epc);
} else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) {
dev_dbg(dev, "Received PM Turn-off event! Entering L23\n");
val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
@@ -591,7 +698,8 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
dw_pcie_ep_linkup(&pci->ep);
pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP;
} else {
- dev_dbg(dev, "Received unknown event: %d\n", status);
+ dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
+ status);
}
return IRQ_HANDLED;
@@ -622,8 +730,15 @@ static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data)
static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
struct qcom_pcie_ep *pcie_ep)
{
+ struct device *dev = pcie_ep->pci.dev;
+ char *name;
int ret;
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_global_irq%d",
+ pcie_ep->pci.ep.epc->domain_nr);
+ if (!name)
+ return -ENOMEM;
+
pcie_ep->global_irq = platform_get_irq_byname(pdev, "global");
if (pcie_ep->global_irq < 0)
return pcie_ep->global_irq;
@@ -631,18 +746,23 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->global_irq, NULL,
qcom_pcie_ep_global_irq_thread,
IRQF_ONESHOT,
- "global_irq", pcie_ep);
+ name, pcie_ep);
if (ret) {
dev_err(&pdev->dev, "Failed to request Global IRQ\n");
return ret;
}
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_perst_irq%d",
+ pcie_ep->pci.ep.epc->domain_nr);
+ if (!name)
+ return -ENOMEM;
+
pcie_ep->perst_irq = gpiod_to_irq(pcie_ep->reset);
irq_set_status_flags(pcie_ep->perst_irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->perst_irq, NULL,
qcom_pcie_ep_perst_irq_thread,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "perst_irq", pcie_ep);
+ name, pcie_ep);
if (ret) {
dev_err(&pdev->dev, "Failed to request PERST IRQ\n");
disable_irq(pcie_ep->global_irq);
@@ -653,14 +773,14 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
}
static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return dw_pcie_ep_raise_legacy_irq(ep, func_no);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "Unknown IRQ type\n");
@@ -701,9 +821,12 @@ static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep)
static const struct pci_epc_features qcom_pcie_epc_features = {
.linkup_notifier = true,
- .core_init_notifier = true,
.msi_capable = true,
- .msix_capable = false,
+ .align = SZ_4K,
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
};
static const struct pci_epc_features *
@@ -722,7 +845,7 @@ static void qcom_pcie_ep_init(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pci_ep_ops = {
- .ep_init = qcom_pcie_ep_init,
+ .init = qcom_pcie_ep_init,
.raise_irq = qcom_pcie_ep_raise_irq,
.get_features = qcom_pcie_epc_get_features,
};
@@ -741,27 +864,29 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
pcie_ep->pci.dev = dev;
pcie_ep->pci.ops = &pci_ops;
pcie_ep->pci.ep.ops = &pci_ep_ops;
+
+ pcie_ep->cfg = of_device_get_match_data(dev);
+ if (pcie_ep->cfg && pcie_ep->cfg->hdma_support) {
+ pcie_ep->pci.edma.ll_wr_cnt = 8;
+ pcie_ep->pci.edma.ll_rd_cnt = 8;
+ pcie_ep->pci.edma.mf = EDMA_MF_HDMA_NATIVE;
+ }
+
platform_set_drvdata(pdev, pcie_ep);
ret = qcom_pcie_ep_get_resources(pdev, pcie_ep);
if (ret)
return ret;
- ret = qcom_pcie_enable_resources(pcie_ep);
- if (ret) {
- dev_err(dev, "Failed to enable resources: %d\n", ret);
- return ret;
- }
-
ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
- goto err_disable_resources;
+ return ret;
}
ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
if (ret)
- goto err_disable_resources;
+ goto err_ep_deinit;
name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
if (!name) {
@@ -778,13 +903,13 @@ err_disable_irqs:
disable_irq(pcie_ep->global_irq);
disable_irq(pcie_ep->perst_irq);
-err_disable_resources:
- qcom_pcie_disable_resources(pcie_ep);
+err_ep_deinit:
+ dw_pcie_ep_deinit(&pcie_ep->pci.ep);
return ret;
}
-static int qcom_pcie_ep_remove(struct platform_device *pdev)
+static void qcom_pcie_ep_remove(struct platform_device *pdev)
{
struct qcom_pcie_ep *pcie_ep = platform_get_drvdata(pdev);
@@ -794,16 +919,22 @@ static int qcom_pcie_ep_remove(struct platform_device *pdev)
debugfs_remove_recursive(pcie_ep->debugfs);
if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED)
- return 0;
+ return;
qcom_pcie_disable_resources(pcie_ep);
-
- return 0;
}
+static const struct qcom_pcie_ep_cfg cfg_1_34_0 = {
+ .hdma_support = true,
+ .override_no_snoop = true,
+ .disable_mhi_ram_parity_check = true,
+};
+
static const struct of_device_id qcom_pcie_ep_match[] = {
+ { .compatible = "qcom,sa8775p-pcie-ep", .data = &cfg_1_34_0},
{ .compatible = "qcom,sdx55-pcie-ep", },
{ .compatible = "qcom,sm8450-pcie-ep", },
+ { .compatible = "qcom,sar2130p-pcie-ep", },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 77e5dc7b88ad..7b92e7a1c0d9 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/crc8.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/interconnect.h>
@@ -17,10 +18,13 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/limits.h>
#include <linux/init.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/phy/pcie.h>
@@ -29,11 +33,60 @@
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/units.h>
#include "../../pci.h"
+#include "../pci-host-common.h"
#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
-#define PCIE20_PARF_SYS_CTRL 0x00
+/* PARF registers */
+#define PARF_SYS_CTRL 0x00
+#define PARF_PM_CTRL 0x20
+#define PARF_PCS_DEEMPH 0x34
+#define PARF_PCS_SWING 0x38
+#define PARF_PHY_CTRL 0x40
+#define PARF_PHY_REFCLK 0x4c
+#define PARF_CONFIG_BITS 0x50
+#define PARF_DBI_BASE_ADDR 0x168
+#define PARF_SLV_ADDR_SPACE_SIZE 0x16c
+#define PARF_MHI_CLOCK_RESET_CTRL 0x174
+#define PARF_AXI_MSTR_WR_ADDR_HALT 0x178
+#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
+#define PARF_Q2A_FLUSH 0x1ac
+#define PARF_LTSSM 0x1b0
+#define PARF_INT_ALL_STATUS 0x224
+#define PARF_INT_ALL_CLEAR 0x228
+#define PARF_INT_ALL_MASK 0x22c
+#define PARF_SID_OFFSET 0x234
+#define PARF_BDF_TRANSLATE_CFG 0x24c
+#define PARF_DBI_BASE_ADDR_V2 0x350
+#define PARF_DBI_BASE_ADDR_V2_HI 0x354
+#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358
+#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c
+#define PARF_NO_SNOOP_OVERRIDE 0x3d4
+#define PARF_ATU_BASE_ADDR 0x634
+#define PARF_ATU_BASE_ADDR_HI 0x638
+#define PARF_DEVICE_TYPE 0x1000
+#define PARF_BDF_TO_SID_TABLE_N 0x2000
+#define PARF_BDF_TO_SID_CFG 0x2c00
+
+/* ELBI registers */
+#define ELBI_SYS_CTRL 0x04
+
+/* DBI registers */
+#define AXI_MSTR_RESP_COMP_CTRL0 0x818
+#define AXI_MSTR_RESP_COMP_CTRL1 0x81c
+
+/* MHI registers */
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88
+
+/* PARF_SYS_CTRL register fields */
+#define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29)
#define MST_WAKEUP_EN BIT(13)
#define SLV_WAKEUP_EN BIT(12)
#define MSTR_ACLK_CGC_DIS BIT(10)
@@ -43,153 +96,138 @@
#define L23_CLK_RMV_DIS BIT(2)
#define L1_CLK_RMV_DIS BIT(1)
-#define PCIE20_PARF_PM_CTRL 0x20
+/* PARF_PM_CTRL register fields */
#define REQ_NOT_ENTR_L1 BIT(5)
-#define PCIE20_PARF_PHY_CTRL 0x40
+/* PARF_PCS_DEEMPH register fields */
+#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) FIELD_PREP(GENMASK(21, 16), x)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) FIELD_PREP(GENMASK(13, 8), x)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) FIELD_PREP(GENMASK(5, 0), x)
+
+/* PARF_PCS_SWING register fields */
+#define PCS_SWING_TX_SWING_FULL(x) FIELD_PREP(GENMASK(14, 8), x)
+#define PCS_SWING_TX_SWING_LOW(x) FIELD_PREP(GENMASK(6, 0), x)
+
+/* PARF_PHY_CTRL register fields */
#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
-#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16)
+#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x)
+#define PHY_TEST_PWR_DOWN BIT(0)
-#define PCIE20_PARF_PHY_REFCLK 0x4C
+/* PARF_PHY_REFCLK register fields */
#define PHY_REFCLK_SSP_EN BIT(16)
#define PHY_REFCLK_USE_PAD BIT(12)
-#define PCIE20_PARF_DBI_BASE_ADDR 0x168
-#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
-#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
+/* PARF_CONFIG_BITS register fields */
+#define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x)
+
+/* PARF_SLV_ADDR_SPACE_SIZE register value */
+#define SLV_ADDR_SPACE_SZ 0x80000000
+
+/* PARF_MHI_CLOCK_RESET_CTRL register fields */
#define AHB_CLK_EN BIT(0)
#define MSTR_AXI_CLK_EN BIT(1)
#define BYPASS BIT(4)
-#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
-#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
-#define PCIE20_PARF_LTSSM 0x1B0
-#define PCIE20_PARF_SID_OFFSET 0x234
-#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
-#define PCIE20_PARF_DEVICE_TYPE 0x1000
-#define PCIE20_PARF_BDF_TO_SID_TABLE_N 0x2000
+/* PARF_AXI_MSTR_WR_ADDR_HALT register fields */
+#define EN BIT(31)
+
+/* PARF_LTSSM register fields */
+#define LTSSM_EN BIT(8)
-#define PCIE20_ELBI_SYS_CTRL 0x04
-#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
+/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
+#define PARF_INT_ALL_LINK_UP BIT(13)
+#define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23)
-#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
+/* PARF_NO_SNOOP_OVERRIDE register fields */
+#define WR_NO_SNOOP_OVERRIDE_EN BIT(1)
+#define RD_NO_SNOOP_OVERRIDE_EN BIT(3)
+
+/* PARF_DEVICE_TYPE register fields */
+#define DEVICE_TYPE_RC 0x4
+
+/* PARF_BDF_TO_SID_CFG fields */
+#define BDF_TO_SID_BYPASS BIT(0)
+
+/* ELBI_SYS_CTRL register fields */
+#define ELBI_SYS_CTRL_LT_ENABLE BIT(0)
+
+/* AXI_MSTR_RESP_COMP_CTRL0 register fields */
#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
-#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
+
+/* AXI_MSTR_RESP_COMP_CTRL1 register fields */
#define CFG_BRIDGE_SB_INIT BIT(0)
-#define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, \
- 250)
-#define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, \
- 1)
+/* PCI_EXP_SLTCAP register fields */
+#define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250)
+#define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1)
#define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \
PCI_EXP_SLTCAP_PCP | \
PCI_EXP_SLTCAP_MRLSP | \
PCI_EXP_SLTCAP_AIP | \
PCI_EXP_SLTCAP_PIP | \
PCI_EXP_SLTCAP_HPS | \
- PCI_EXP_SLTCAP_HPC | \
PCI_EXP_SLTCAP_EIP | \
PCIE_CAP_SLOT_POWER_LIMIT_VAL | \
PCIE_CAP_SLOT_POWER_LIMIT_SCALE)
-#define PCIE20_PARF_Q2A_FLUSH 0x1AC
-
-#define PCIE20_MISC_CONTROL_1_REG 0x8BC
-#define DBI_RO_WR_EN 1
-
#define PERST_DELAY_US 1000
-/* PARF registers */
-#define PCIE20_PARF_PCS_DEEMPH 0x34
-#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16)
-#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8)
-#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0)
-
-#define PCIE20_PARF_PCS_SWING 0x38
-#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8)
-#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0)
-
-#define PCIE20_PARF_CONFIG_BITS 0x50
-#define PHY_RX0_EQ(x) ((x) << 24)
-
-#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
-#define SLV_ADDR_SPACE_SZ 0x10000000
-#define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0
+#define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
-#define DEVICE_TYPE_RC 0x4
-
-#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
-#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
-
-#define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
-
-struct qcom_pcie_resources_2_1_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
- struct reset_control *pci_reset;
- struct reset_control *axi_reset;
- struct reset_control *ahb_reset;
- struct reset_control *por_reset;
- struct reset_control *phy_reset;
- struct reset_control *ext_reset;
- struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
-};
+#define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
+ Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
struct qcom_pcie_resources_1_0_0 {
- struct clk *iface;
- struct clk *aux;
- struct clk *master_bus;
- struct clk *slave_bus;
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control *core;
struct regulator *vdda;
};
-#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
+#define QCOM_PCIE_2_1_0_MAX_RESETS 6
+#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
+struct qcom_pcie_resources_2_1_0 {
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];
+ int num_resets;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
+};
+
+#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
struct qcom_pcie_resources_2_3_2 {
- struct clk *aux_clk;
- struct clk *master_clk;
- struct clk *slave_clk;
- struct clk *cfg_clk;
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
};
-#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
-struct qcom_pcie_resources_2_4_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
+#define QCOM_PCIE_2_3_3_MAX_RESETS 7
+struct qcom_pcie_resources_2_3_3 {
+ struct clk_bulk_data *clks;
int num_clks;
- struct reset_control *axi_m_reset;
- struct reset_control *axi_s_reset;
- struct reset_control *pipe_reset;
- struct reset_control *axi_m_vmid_reset;
- struct reset_control *axi_s_xpu_reset;
- struct reset_control *parf_reset;
- struct reset_control *phy_reset;
- struct reset_control *axi_m_sticky_reset;
- struct reset_control *pipe_sticky_reset;
- struct reset_control *pwr_reset;
- struct reset_control *ahb_reset;
- struct reset_control *phy_ahb_reset;
+ struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];
};
-struct qcom_pcie_resources_2_3_3 {
- struct clk *iface;
- struct clk *axi_m_clk;
- struct clk *axi_s_clk;
- struct clk *ahb_clk;
- struct clk *aux_clk;
- struct reset_control *rst[7];
+#define QCOM_PCIE_2_4_0_MAX_RESETS 12
+struct qcom_pcie_resources_2_4_0 {
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];
+ int num_resets;
};
-/* 6 clocks typically, 7 for sm8250 */
+#define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2
struct qcom_pcie_resources_2_7_0 {
- struct clk_bulk_data clks[12];
+ struct clk_bulk_data *clks;
int num_clks;
- struct regulator_bulk_data supplies[2];
- struct reset_control *pci_reset;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];
+ struct reset_control *rst;
};
struct qcom_pcie_resources_2_9_0 {
- struct clk_bulk_data clks[5];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control *rst;
};
@@ -209,46 +247,80 @@ struct qcom_pcie_ops {
int (*get_resources)(struct qcom_pcie *pcie);
int (*init)(struct qcom_pcie *pcie);
int (*post_init)(struct qcom_pcie *pcie);
+ void (*host_post_init)(struct qcom_pcie *pcie);
void (*deinit)(struct qcom_pcie *pcie);
void (*ltssm_enable)(struct qcom_pcie *pcie);
int (*config_sid)(struct qcom_pcie *pcie);
};
+ /**
+ * struct qcom_pcie_cfg - Per SoC config struct
+ * @ops: qcom PCIe ops structure
+ * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache
+ * snooping
+ * @firmware_managed: Set if the Root Complex is firmware managed
+ */
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
+ bool override_no_snoop;
+ bool firmware_managed;
+ bool no_l0s;
+};
+
+struct qcom_pcie_port {
+ struct list_head list;
+ struct gpio_desc *reset;
+ struct phy *phy;
};
struct qcom_pcie {
struct dw_pcie *pci;
void __iomem *parf; /* DT parf */
- void __iomem *elbi; /* DT elbi */
+ void __iomem *mhi;
union qcom_pcie_resources res;
- struct phy *phy;
- struct gpio_desc *reset;
struct icc_path *icc_mem;
+ struct icc_path *icc_cpu;
const struct qcom_pcie_cfg *cfg;
+ struct dentry *debugfs;
+ struct list_head ports;
+ bool suspended;
+ bool use_pm_opp;
};
#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
-static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+static void qcom_perst_assert(struct qcom_pcie *pcie, bool assert)
{
- gpiod_set_value_cansleep(pcie->reset, 1);
+ struct qcom_pcie_port *port;
+ int val = assert ? 1 : 0;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ gpiod_set_value_cansleep(port->reset, val);
+
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
+static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+{
+ qcom_perst_assert(pcie, true);
+}
+
static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
{
/* Ensure that PERST has been asserted for at least 100 ms */
- msleep(100);
- gpiod_set_value_cansleep(pcie->reset, 0);
- usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+ msleep(PCIE_T_PVPERL_MS);
+ qcom_perst_assert(pcie, false);
}
static int qcom_pcie_start_link(struct dw_pcie *pci)
{
struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ qcom_pcie_common_set_equalization(pci);
+
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT)
+ qcom_pcie_common_set_16gt_lane_margining(pci);
+
/* Enable Link Training state machine */
if (pcie->cfg->ops->ltssm_enable)
pcie->cfg->ops->ltssm_enable(pcie);
@@ -256,14 +328,97 @@ static int qcom_pcie_start_link(struct dw_pcie *pci)
return 0;
}
+static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci)
+{
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ u16 offset;
+ u32 val;
+
+ if (!pcie->cfg->no_l0s)
+ return;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_ASPM_L0S;
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
+{
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP);
+ val &= ~PCI_EXP_SLTCAP_HPC;
+ writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pci->dbi_phys_addr) {
+ /*
+ * PARF_DBI_BASE_ADDR register is in CPU domain and require to
+ * be programmed with CPU physical address.
+ */
+ writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR);
+ writel(SLV_ADDR_SPACE_SZ, pcie->parf +
+ PARF_SLV_ADDR_SPACE_SIZE);
+ }
+}
+
+static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pci->dbi_phys_addr) {
+ /*
+ * PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are
+ * in CPU domain and require to be programmed with CPU
+ * physical addresses.
+ */
+ writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR_V2);
+ writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR_V2_HI);
+
+ if (pci->atu_phys_addr) {
+ writel(lower_32_bits(pci->atu_phys_addr), pcie->parf +
+ PARF_ATU_BASE_ADDR);
+ writel(upper_32_bits(pci->atu_phys_addr), pcie->parf +
+ PARF_ATU_BASE_ADDR_HI);
+ }
+
+ writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2);
+ writel(SLV_ADDR_SPACE_SZ, pcie->parf +
+ PARF_SLV_ADDR_SPACE_SIZE_V2_HI);
+ }
+}
+
static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
{
+ struct dw_pcie *pci = pcie->pci;
u32 val;
+ if (!pci->elbi_base) {
+ dev_err(pci->dev, "ELBI is not present\n");
+ return;
+ }
/* enable link training */
- val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
- val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
- writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+ val = readl(pci->elbi_base + ELBI_SYS_CTRL);
+ val |= ELBI_SYS_CTRL_LT_ENABLE;
+ writel(val, pci->elbi_base + ELBI_SYS_CTRL);
}
static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
@@ -271,6 +426,7 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
+ bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064");
int ret;
res->supplies[0].supply = "vdda";
@@ -281,59 +437,36 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
if (ret)
return ret;
- res->clks[0].id = "iface";
- res->clks[1].id = "core";
- res->clks[2].id = "phy";
- res->clks[3].id = "aux";
- res->clks[4].id = "ref";
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
- /* iface, core, phy are required */
- ret = devm_clk_bulk_get(dev, 3, res->clks);
- if (ret < 0)
- return ret;
+ res->resets[0].id = "pci";
+ res->resets[1].id = "axi";
+ res->resets[2].id = "ahb";
+ res->resets[3].id = "por";
+ res->resets[4].id = "phy";
+ res->resets[5].id = "ext";
- /* aux, ref are optional */
- ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
+ /* ext is optional on APQ8016 */
+ res->num_resets = is_apq ? 5 : 6;
+ ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
if (ret < 0)
return ret;
- res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
- if (IS_ERR(res->pci_reset))
- return PTR_ERR(res->pci_reset);
-
- res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
- if (IS_ERR(res->axi_reset))
- return PTR_ERR(res->axi_reset);
-
- res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
- if (IS_ERR(res->ahb_reset))
- return PTR_ERR(res->ahb_reset);
-
- res->por_reset = devm_reset_control_get_exclusive(dev, "por");
- if (IS_ERR(res->por_reset))
- return PTR_ERR(res->por_reset);
-
- res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
- if (IS_ERR(res->ext_reset))
- return PTR_ERR(res->ext_reset);
-
- res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
- return PTR_ERR_OR_ZERO(res->phy_reset);
+ return 0;
}
static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
- reset_control_assert(res->pci_reset);
- reset_control_assert(res->axi_reset);
- reset_control_assert(res->ahb_reset);
- reset_control_assert(res->por_reset);
- reset_control_assert(res->ext_reset);
- reset_control_assert(res->phy_reset);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
+ reset_control_bulk_assert(res->num_resets, res->resets);
- writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ writel(1, pcie->parf + PARF_PHY_CTRL);
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
@@ -346,12 +479,11 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
int ret;
/* reset the PCIe interface as uboot can leave it undefined state */
- reset_control_assert(res->pci_reset);
- reset_control_assert(res->axi_reset);
- reset_control_assert(res->ahb_reset);
- reset_control_assert(res->por_reset);
- reset_control_assert(res->ext_reset);
- reset_control_assert(res->phy_reset);
+ ret = reset_control_bulk_assert(res->num_resets, res->resets);
+ if (ret < 0) {
+ dev_err(dev, "cannot assert resets\n");
+ return ret;
+ }
ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
if (ret < 0) {
@@ -359,58 +491,14 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
return ret;
}
- ret = reset_control_deassert(res->ahb_reset);
- if (ret) {
- dev_err(dev, "cannot deassert ahb reset\n");
- goto err_deassert_ahb;
- }
-
- ret = reset_control_deassert(res->ext_reset);
- if (ret) {
- dev_err(dev, "cannot deassert ext reset\n");
- goto err_deassert_ext;
- }
-
- ret = reset_control_deassert(res->phy_reset);
- if (ret) {
- dev_err(dev, "cannot deassert phy reset\n");
- goto err_deassert_phy;
- }
-
- ret = reset_control_deassert(res->pci_reset);
- if (ret) {
- dev_err(dev, "cannot deassert pci reset\n");
- goto err_deassert_pci;
- }
-
- ret = reset_control_deassert(res->por_reset);
- if (ret) {
- dev_err(dev, "cannot deassert por reset\n");
- goto err_deassert_por;
- }
-
- ret = reset_control_deassert(res->axi_reset);
- if (ret) {
- dev_err(dev, "cannot deassert axi reset\n");
- goto err_deassert_axi;
+ ret = reset_control_bulk_deassert(res->num_resets, res->resets);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert resets\n");
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+ return ret;
}
return 0;
-
-err_deassert_axi:
- reset_control_assert(res->por_reset);
-err_deassert_por:
- reset_control_assert(res->pci_reset);
-err_deassert_pci:
- reset_control_assert(res->phy_reset);
-err_deassert_phy:
- reset_control_assert(res->ext_reset);
-err_deassert_ext:
- reset_control_assert(res->ahb_reset);
-err_deassert_ahb:
- regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
-
- return ret;
}
static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
@@ -423,11 +511,11 @@ static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
int ret;
/* enable PCIe clocks and resets */
- val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
- val &= ~BIT(0);
- writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret)
return ret;
@@ -436,37 +524,39 @@ static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
- pcie->parf + PCIE20_PARF_PCS_DEEMPH);
+ pcie->parf + PARF_PCS_DEEMPH);
writel(PCS_SWING_TX_SWING_FULL(120) |
PCS_SWING_TX_SWING_LOW(120),
- pcie->parf + PCIE20_PARF_PCS_SWING);
- writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
+ pcie->parf + PARF_PCS_SWING);
+ writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS);
}
if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
/* set TX termination offset */
- val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val = readl(pcie->parf + PARF_PHY_CTRL);
val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
- writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ writel(val, pcie->parf + PARF_PHY_CTRL);
}
/* enable external reference clock */
- val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
+ val = readl(pcie->parf + PARF_PHY_REFCLK);
/* USE_PAD is required only for ipq806x */
if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
val &= ~PHY_REFCLK_USE_PAD;
val |= PHY_REFCLK_SSP_EN;
- writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
+ writel(val, pcie->parf + PARF_PHY_REFCLK);
/* wait for clock acquisition */
usleep_range(1000, 1500);
/* Set the Max TLP size to 2K, instead of using default of 4K */
writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
- pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
+ pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0);
writel(CFG_BRIDGE_SB_INIT,
- pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
+ pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1);
+
+ qcom_pcie_clear_hpc(pcie->pci);
return 0;
}
@@ -481,21 +571,11 @@ static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
if (IS_ERR(res->vdda))
return PTR_ERR(res->vdda);
- res->iface = devm_clk_get(dev, "iface");
- if (IS_ERR(res->iface))
- return PTR_ERR(res->iface);
-
- res->aux = devm_clk_get(dev, "aux");
- if (IS_ERR(res->aux))
- return PTR_ERR(res->aux);
-
- res->master_bus = devm_clk_get(dev, "master_bus");
- if (IS_ERR(res->master_bus))
- return PTR_ERR(res->master_bus);
-
- res->slave_bus = devm_clk_get(dev, "slave_bus");
- if (IS_ERR(res->slave_bus))
- return PTR_ERR(res->slave_bus);
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->core = devm_reset_control_get_exclusive(dev, "core");
return PTR_ERR_OR_ZERO(res->core);
@@ -506,10 +586,7 @@ static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
reset_control_assert(res->core);
- clk_disable_unprepare(res->slave_bus);
- clk_disable_unprepare(res->master_bus);
- clk_disable_unprepare(res->iface);
- clk_disable_unprepare(res->aux);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
regulator_disable(res->vdda);
}
@@ -526,46 +603,23 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
return ret;
}
- ret = clk_prepare_enable(res->aux);
- if (ret) {
- dev_err(dev, "cannot prepare/enable aux clock\n");
- goto err_res;
- }
-
- ret = clk_prepare_enable(res->iface);
- if (ret) {
- dev_err(dev, "cannot prepare/enable iface clock\n");
- goto err_aux;
- }
-
- ret = clk_prepare_enable(res->master_bus);
- if (ret) {
- dev_err(dev, "cannot prepare/enable master_bus clock\n");
- goto err_iface;
- }
-
- ret = clk_prepare_enable(res->slave_bus);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
- dev_err(dev, "cannot prepare/enable slave_bus clock\n");
- goto err_master;
+ dev_err(dev, "cannot prepare/enable clocks\n");
+ goto err_assert_reset;
}
ret = regulator_enable(res->vdda);
if (ret) {
dev_err(dev, "cannot enable vdda regulator\n");
- goto err_slave;
+ goto err_disable_clks;
}
return 0;
-err_slave:
- clk_disable_unprepare(res->slave_bus);
-err_master:
- clk_disable_unprepare(res->master_bus);
-err_iface:
- clk_disable_unprepare(res->iface);
-err_aux:
- clk_disable_unprepare(res->aux);
-err_res:
+
+err_disable_clks:
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
+err_assert_reset:
reset_control_assert(res->core);
return ret;
@@ -573,16 +627,29 @@ err_res:
static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
{
- /* change DBI base address */
- writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_base(pcie);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+ u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
- val |= BIT(31);
- writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+ val |= EN;
+ writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
}
+ qcom_pcie_clear_hpc(pcie->pci);
+
+ return 0;
+}
+
+static int qcom_pcie_assert_perst(struct dw_pcie *pci, bool assert)
+{
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+
+ if (assert)
+ qcom_ep_reset_assert(pcie);
+ else
+ qcom_ep_reset_deassert(pcie);
+
return 0;
}
@@ -591,9 +658,9 @@ static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
u32 val;
/* enable link training */
- val = readl(pcie->parf + PCIE20_PARF_LTSSM);
- val |= BIT(8);
- writel(val, pcie->parf + PCIE20_PARF_LTSSM);
+ val = readl(pcie->parf + PARF_LTSSM);
+ val |= LTSSM_EN;
+ writel(val, pcie->parf + PARF_LTSSM);
}
static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
@@ -610,21 +677,11 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
if (ret)
return ret;
- res->aux_clk = devm_clk_get(dev, "aux");
- if (IS_ERR(res->aux_clk))
- return PTR_ERR(res->aux_clk);
-
- res->cfg_clk = devm_clk_get(dev, "cfg");
- if (IS_ERR(res->cfg_clk))
- return PTR_ERR(res->cfg_clk);
-
- res->master_clk = devm_clk_get(dev, "bus_master");
- if (IS_ERR(res->master_clk))
- return PTR_ERR(res->master_clk);
-
- res->slave_clk = devm_clk_get(dev, "bus_slave");
- if (IS_ERR(res->slave_clk))
- return PTR_ERR(res->slave_clk);
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
return 0;
}
@@ -633,11 +690,7 @@ static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
- clk_disable_unprepare(res->slave_clk);
- clk_disable_unprepare(res->master_clk);
- clk_disable_unprepare(res->cfg_clk);
- clk_disable_unprepare(res->aux_clk);
-
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
@@ -654,43 +707,14 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
return ret;
}
- ret = clk_prepare_enable(res->aux_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable aux clock\n");
- goto err_aux_clk;
- }
-
- ret = clk_prepare_enable(res->cfg_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable cfg clock\n");
- goto err_cfg_clk;
- }
-
- ret = clk_prepare_enable(res->master_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable master clock\n");
- goto err_master_clk;
- }
-
- ret = clk_prepare_enable(res->slave_clk);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
- dev_err(dev, "cannot prepare/enable slave clock\n");
- goto err_slave_clk;
+ dev_err(dev, "cannot prepare/enable clocks\n");
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+ return ret;
}
return 0;
-
-err_slave_clk:
- clk_disable_unprepare(res->master_clk);
-err_master_clk:
- clk_disable_unprepare(res->cfg_clk);
-err_cfg_clk:
- clk_disable_unprepare(res->aux_clk);
-
-err_aux_clk:
- regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
-
- return ret;
}
static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
@@ -698,25 +722,26 @@ static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
u32 val;
/* enable PCIe clocks and resets */
- val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
- val &= ~BIT(0);
- writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
- /* change DBI base address */
- writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_base(pcie);
/* MAC PHY_POWERDOWN MUX DISABLE */
- val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
- val &= ~BIT(29);
- writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+ val = readl(pcie->parf + PARF_SYS_CTRL);
+ val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
+ writel(val, pcie->parf + PARF_SYS_CTRL);
- val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
- val |= BIT(4);
- writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BYPASS;
+ writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
- val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
- val |= BIT(31);
- writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= EN;
+ writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+ qcom_pcie_clear_hpc(pcie->pci);
return 0;
}
@@ -729,77 +754,30 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
int ret;
- res->clks[0].id = "aux";
- res->clks[1].id = "master_bus";
- res->clks[2].id = "slave_bus";
- res->clks[3].id = "iface";
-
- /* qcom,pcie-ipq4019 is defined without "iface" */
- res->num_clks = is_ipq ? 3 : 4;
-
- ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
- if (ret < 0)
- return ret;
-
- res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
- if (IS_ERR(res->axi_m_reset))
- return PTR_ERR(res->axi_m_reset);
-
- res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
- if (IS_ERR(res->axi_s_reset))
- return PTR_ERR(res->axi_s_reset);
-
- if (is_ipq) {
- /*
- * These resources relates to the PHY or are secure clocks, but
- * are controlled here for IPQ4019
- */
- res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
- if (IS_ERR(res->pipe_reset))
- return PTR_ERR(res->pipe_reset);
-
- res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
- "axi_m_vmid");
- if (IS_ERR(res->axi_m_vmid_reset))
- return PTR_ERR(res->axi_m_vmid_reset);
-
- res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
- "axi_s_xpu");
- if (IS_ERR(res->axi_s_xpu_reset))
- return PTR_ERR(res->axi_s_xpu_reset);
-
- res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
- if (IS_ERR(res->parf_reset))
- return PTR_ERR(res->parf_reset);
-
- res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
- if (IS_ERR(res->phy_reset))
- return PTR_ERR(res->phy_reset);
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
}
- res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
- "axi_m_sticky");
- if (IS_ERR(res->axi_m_sticky_reset))
- return PTR_ERR(res->axi_m_sticky_reset);
-
- res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
- "pipe_sticky");
- if (IS_ERR(res->pipe_sticky_reset))
- return PTR_ERR(res->pipe_sticky_reset);
+ res->resets[0].id = "axi_m";
+ res->resets[1].id = "axi_s";
+ res->resets[2].id = "axi_m_sticky";
+ res->resets[3].id = "pipe_sticky";
+ res->resets[4].id = "pwr";
+ res->resets[5].id = "ahb";
+ res->resets[6].id = "pipe";
+ res->resets[7].id = "axi_m_vmid";
+ res->resets[8].id = "axi_s_xpu";
+ res->resets[9].id = "parf";
+ res->resets[10].id = "phy";
+ res->resets[11].id = "phy_ahb";
- res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
- if (IS_ERR(res->pwr_reset))
- return PTR_ERR(res->pwr_reset);
+ res->num_resets = is_ipq ? 12 : 6;
- res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
- if (IS_ERR(res->ahb_reset))
- return PTR_ERR(res->ahb_reset);
-
- if (is_ipq) {
- res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
- if (IS_ERR(res->phy_ahb_reset))
- return PTR_ERR(res->phy_ahb_reset);
- }
+ ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
+ if (ret < 0)
+ return ret;
return 0;
}
@@ -808,15 +786,7 @@ static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
- reset_control_assert(res->axi_m_reset);
- reset_control_assert(res->axi_s_reset);
- reset_control_assert(res->pipe_reset);
- reset_control_assert(res->pipe_sticky_reset);
- reset_control_assert(res->phy_reset);
- reset_control_assert(res->phy_ahb_reset);
- reset_control_assert(res->axi_m_sticky_reset);
- reset_control_assert(res->pwr_reset);
- reset_control_assert(res->ahb_reset);
+ reset_control_bulk_assert(res->num_resets, res->resets);
clk_bulk_disable_unprepare(res->num_clks, res->clks);
}
@@ -827,176 +797,28 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
struct device *dev = pci->dev;
int ret;
- ret = reset_control_assert(res->axi_m_reset);
- if (ret) {
- dev_err(dev, "cannot assert axi master reset\n");
- return ret;
- }
-
- ret = reset_control_assert(res->axi_s_reset);
- if (ret) {
- dev_err(dev, "cannot assert axi slave reset\n");
- return ret;
- }
-
- usleep_range(10000, 12000);
-
- ret = reset_control_assert(res->pipe_reset);
- if (ret) {
- dev_err(dev, "cannot assert pipe reset\n");
- return ret;
- }
-
- ret = reset_control_assert(res->pipe_sticky_reset);
- if (ret) {
- dev_err(dev, "cannot assert pipe sticky reset\n");
- return ret;
- }
-
- ret = reset_control_assert(res->phy_reset);
- if (ret) {
- dev_err(dev, "cannot assert phy reset\n");
- return ret;
- }
-
- ret = reset_control_assert(res->phy_ahb_reset);
- if (ret) {
- dev_err(dev, "cannot assert phy ahb reset\n");
+ ret = reset_control_bulk_assert(res->num_resets, res->resets);
+ if (ret < 0) {
+ dev_err(dev, "cannot assert resets\n");
return ret;
}
usleep_range(10000, 12000);
- ret = reset_control_assert(res->axi_m_sticky_reset);
- if (ret) {
- dev_err(dev, "cannot assert axi master sticky reset\n");
- return ret;
- }
-
- ret = reset_control_assert(res->pwr_reset);
- if (ret) {
- dev_err(dev, "cannot assert power reset\n");
- return ret;
- }
-
- ret = reset_control_assert(res->ahb_reset);
- if (ret) {
- dev_err(dev, "cannot assert ahb reset\n");
+ ret = reset_control_bulk_deassert(res->num_resets, res->resets);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert resets\n");
return ret;
}
usleep_range(10000, 12000);
- ret = reset_control_deassert(res->phy_ahb_reset);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
- dev_err(dev, "cannot deassert phy ahb reset\n");
+ reset_control_bulk_assert(res->num_resets, res->resets);
return ret;
}
- ret = reset_control_deassert(res->phy_reset);
- if (ret) {
- dev_err(dev, "cannot deassert phy reset\n");
- goto err_rst_phy;
- }
-
- ret = reset_control_deassert(res->pipe_reset);
- if (ret) {
- dev_err(dev, "cannot deassert pipe reset\n");
- goto err_rst_pipe;
- }
-
- ret = reset_control_deassert(res->pipe_sticky_reset);
- if (ret) {
- dev_err(dev, "cannot deassert pipe sticky reset\n");
- goto err_rst_pipe_sticky;
- }
-
- usleep_range(10000, 12000);
-
- ret = reset_control_deassert(res->axi_m_reset);
- if (ret) {
- dev_err(dev, "cannot deassert axi master reset\n");
- goto err_rst_axi_m;
- }
-
- ret = reset_control_deassert(res->axi_m_sticky_reset);
- if (ret) {
- dev_err(dev, "cannot deassert axi master sticky reset\n");
- goto err_rst_axi_m_sticky;
- }
-
- ret = reset_control_deassert(res->axi_s_reset);
- if (ret) {
- dev_err(dev, "cannot deassert axi slave reset\n");
- goto err_rst_axi_s;
- }
-
- ret = reset_control_deassert(res->pwr_reset);
- if (ret) {
- dev_err(dev, "cannot deassert power reset\n");
- goto err_rst_pwr;
- }
-
- ret = reset_control_deassert(res->ahb_reset);
- if (ret) {
- dev_err(dev, "cannot deassert ahb reset\n");
- goto err_rst_ahb;
- }
-
- usleep_range(10000, 12000);
-
- ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
- if (ret)
- goto err_clks;
-
- return 0;
-
-err_clks:
- reset_control_assert(res->ahb_reset);
-err_rst_ahb:
- reset_control_assert(res->pwr_reset);
-err_rst_pwr:
- reset_control_assert(res->axi_s_reset);
-err_rst_axi_s:
- reset_control_assert(res->axi_m_sticky_reset);
-err_rst_axi_m_sticky:
- reset_control_assert(res->axi_m_reset);
-err_rst_axi_m:
- reset_control_assert(res->pipe_sticky_reset);
-err_rst_pipe_sticky:
- reset_control_assert(res->pipe_reset);
-err_rst_pipe:
- reset_control_assert(res->phy_reset);
-err_rst_phy:
- reset_control_assert(res->phy_ahb_reset);
- return ret;
-}
-
-static int qcom_pcie_post_init_2_4_0(struct qcom_pcie *pcie)
-{
- u32 val;
-
- /* enable PCIe clocks and resets */
- val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
- val &= ~BIT(0);
- writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
-
- /* change DBI base address */
- writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
-
- /* MAC PHY_POWERDOWN MUX DISABLE */
- val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
- val &= ~BIT(29);
- writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
-
- val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
- val |= BIT(4);
- writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
-
- val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
- val |= BIT(31);
- writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
-
return 0;
}
@@ -1005,37 +827,26 @@ static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- int i;
- const char *rst_names[] = { "axi_m", "axi_s", "pipe",
- "axi_m_sticky", "sticky",
- "ahb", "sleep", };
-
- res->iface = devm_clk_get(dev, "iface");
- if (IS_ERR(res->iface))
- return PTR_ERR(res->iface);
-
- res->axi_m_clk = devm_clk_get(dev, "axi_m");
- if (IS_ERR(res->axi_m_clk))
- return PTR_ERR(res->axi_m_clk);
-
- res->axi_s_clk = devm_clk_get(dev, "axi_s");
- if (IS_ERR(res->axi_s_clk))
- return PTR_ERR(res->axi_s_clk);
-
- res->ahb_clk = devm_clk_get(dev, "ahb");
- if (IS_ERR(res->ahb_clk))
- return PTR_ERR(res->ahb_clk);
-
- res->aux_clk = devm_clk_get(dev, "aux");
- if (IS_ERR(res->aux_clk))
- return PTR_ERR(res->aux_clk);
-
- for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
- res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
- if (IS_ERR(res->rst[i]))
- return PTR_ERR(res->rst[i]);
+ int ret;
+
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
}
+ res->rst[0].id = "axi_m";
+ res->rst[1].id = "axi_s";
+ res->rst[2].id = "pipe";
+ res->rst[3].id = "axi_m_sticky";
+ res->rst[4].id = "sticky";
+ res->rst[5].id = "ahb";
+ res->rst[6].id = "sleep";
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst);
+ if (ret < 0)
+ return ret;
+
return 0;
}
@@ -1043,11 +854,7 @@ static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
- clk_disable_unprepare(res->iface);
- clk_disable_unprepare(res->axi_m_clk);
- clk_disable_unprepare(res->axi_s_clk);
- clk_disable_unprepare(res->ahb_clk);
- clk_disable_unprepare(res->aux_clk);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
}
static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
@@ -1055,25 +862,20 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- int i, ret;
+ int ret;
- for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
- ret = reset_control_assert(res->rst[i]);
- if (ret) {
- dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
- return ret;
- }
+ ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
+ if (ret < 0) {
+ dev_err(dev, "cannot assert resets\n");
+ return ret;
}
usleep_range(2000, 2500);
- for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
- ret = reset_control_deassert(res->rst[i]);
- if (ret) {
- dev_err(dev, "reset #%d deassert failed (%d)\n", i,
- ret);
- return ret;
- }
+ ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert resets\n");
+ return ret;
}
/*
@@ -1082,53 +884,20 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
*/
usleep_range(2000, 2500);
- ret = clk_prepare_enable(res->iface);
- if (ret) {
- dev_err(dev, "cannot prepare/enable core clock\n");
- goto err_clk_iface;
- }
-
- ret = clk_prepare_enable(res->axi_m_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable core clock\n");
- goto err_clk_axi_m;
- }
-
- ret = clk_prepare_enable(res->axi_s_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable axi slave clock\n");
- goto err_clk_axi_s;
- }
-
- ret = clk_prepare_enable(res->ahb_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable ahb clock\n");
- goto err_clk_ahb;
- }
-
- ret = clk_prepare_enable(res->aux_clk);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
- dev_err(dev, "cannot prepare/enable aux clock\n");
- goto err_clk_aux;
+ dev_err(dev, "cannot prepare/enable clocks\n");
+ goto err_assert_resets;
}
return 0;
-err_clk_aux:
- clk_disable_unprepare(res->ahb_clk);
-err_clk_ahb:
- clk_disable_unprepare(res->axi_s_clk);
-err_clk_axi_s:
- clk_disable_unprepare(res->axi_m_clk);
-err_clk_axi_m:
- clk_disable_unprepare(res->iface);
-err_clk_iface:
+err_assert_resets:
/*
* Not checking for failure, will anyway return
* the original failure in 'ret'.
*/
- for (i = 0; i < ARRAY_SIZE(res->rst); i++)
- reset_control_assert(res->rst[i]);
+ reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
return ret;
}
@@ -1139,23 +908,22 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 val;
- writel(SLV_ADDR_SPACE_SZ,
- pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
-
- val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
- val &= ~BIT(0);
- writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
- writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
- pcie->parf + PCIE20_PARF_SYS_CTRL);
- writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
+ pcie->parf + PARF_SYS_CTRL);
+ writel(0, pcie->parf + PARF_Q2A_FLUSH);
writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
- writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
@@ -1165,6 +933,8 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
PCI_EXP_DEVCTL2);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
return 0;
}
@@ -1173,13 +943,11 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- unsigned int num_clks, num_opt_clks;
- unsigned int idx;
int ret;
- res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
- if (IS_ERR(res->pci_reset))
- return PTR_ERR(res->pci_reset);
+ res->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(res->rst))
+ return PTR_ERR(res->rst);
res->supplies[0].supply = "vdda";
res->supplies[1].supply = "vddpe-3v3";
@@ -1188,33 +956,11 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
if (ret)
return ret;
- idx = 0;
- res->clks[idx++].id = "aux";
- res->clks[idx++].id = "cfg";
- res->clks[idx++].id = "bus_master";
- res->clks[idx++].id = "bus_slave";
- res->clks[idx++].id = "slave_q2a";
-
- num_clks = idx;
-
- ret = devm_clk_bulk_get(dev, num_clks, res->clks);
- if (ret < 0)
- return ret;
-
- res->clks[idx++].id = "tbu";
- res->clks[idx++].id = "ddrss_sf_tbu";
- res->clks[idx++].id = "aggre0";
- res->clks[idx++].id = "aggre1";
- res->clks[idx++].id = "noc_aggr_4";
- res->clks[idx++].id = "noc_aggr_south_sf";
- res->clks[idx++].id = "cnoc_qx";
-
- num_opt_clks = idx - num_clks;
- res->num_clks = idx;
-
- ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
return 0;
}
@@ -1237,17 +983,17 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
if (ret < 0)
goto err_disable_regulators;
- ret = reset_control_assert(res->pci_reset);
- if (ret < 0) {
- dev_err(dev, "cannot assert pci reset\n");
+ ret = reset_control_assert(res->rst);
+ if (ret) {
+ dev_err(dev, "reset assert failed (%d)\n", ret);
goto err_disable_clocks;
}
usleep_range(1000, 1500);
- ret = reset_control_deassert(res->pci_reset);
- if (ret < 0) {
- dev_err(dev, "cannot deassert pci reset\n");
+ ret = reset_control_deassert(res->rst);
+ if (ret) {
+ dev_err(dev, "reset deassert failed (%d)\n", ret);
goto err_disable_clocks;
}
@@ -1255,35 +1001,34 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
usleep_range(1000, 1500);
/* configure PCIe to RC mode */
- writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
+ writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
/* enable PCIe clocks and resets */
- val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
- val &= ~BIT(0);
- writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
- /* change DBI base address */
- writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
/* MAC PHY_POWERDOWN MUX DISABLE */
- val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
- val &= ~BIT(29);
- writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+ val = readl(pcie->parf + PARF_SYS_CTRL);
+ val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
+ writel(val, pcie->parf + PARF_SYS_CTRL);
- val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
- val |= BIT(4);
- writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BYPASS;
+ writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
/* Enable L1 and L1SS */
- val = readl(pcie->parf + PCIE20_PARF_PM_CTRL);
+ val = readl(pcie->parf + PARF_PM_CTRL);
val &= ~REQ_NOT_ENTR_L1;
- writel(val, pcie->parf + PCIE20_PARF_PM_CTRL);
+ writel(val, pcie->parf + PARF_PM_CTRL);
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
- val |= BIT(31);
- writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
- }
+ pci->l1ss_support = true;
+
+ val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= EN;
+ writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
return 0;
err_disable_clocks:
@@ -1294,6 +1039,39 @@ err_disable_regulators:
return ret;
}
+static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
+{
+ const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg;
+
+ if (pcie_cfg->override_no_snoop)
+ writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
+ pcie->parf + PARF_NO_SNOOP_OVERRIDE);
+
+ qcom_pcie_clear_aspm_l0s(pcie->pci);
+ qcom_pcie_clear_hpc(pcie->pci);
+
+ return 0;
+}
+
+static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata)
+{
+ /*
+ * Downstream devices need to be in D0 state before enabling PCI PM
+ * substates.
+ */
+ pci_set_power_state_locked(pdev, PCI_D0);
+ pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
+
+ return 0;
+}
+
+static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie)
+{
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
+
+ pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL);
+}
+
static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
@@ -1303,22 +1081,93 @@ static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
+static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
+{
+ /* iommu map structure */
+ struct {
+ u32 bdf;
+ u32 phandle;
+ u32 smmu_sid;
+ u32 smmu_sid_len;
+ } *map;
+ void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N;
+ struct device *dev = pcie->pci->dev;
+ u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
+ int i, nr_map, size = 0;
+ u32 smmu_sid_base;
+ u32 val;
+
+ of_get_property(dev->of_node, "iommu-map", &size);
+ if (!size)
+ return 0;
+
+ /* Enable BDF to SID translation by disabling bypass mode (default) */
+ val = readl(pcie->parf + PARF_BDF_TO_SID_CFG);
+ val &= ~BDF_TO_SID_BYPASS;
+ writel(val, pcie->parf + PARF_BDF_TO_SID_CFG);
+
+ map = kzalloc(size, GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map,
+ size / sizeof(u32));
+
+ nr_map = size / (sizeof(*map));
+
+ crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
+
+ /* Registers need to be zero out first */
+ memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
+
+ /* Extract the SMMU SID base from the first entry of iommu-map */
+ smmu_sid_base = map[0].smmu_sid;
+
+ /* Look for an available entry to hold the mapping */
+ for (i = 0; i < nr_map; i++) {
+ __be16 bdf_be = cpu_to_be16(map[i].bdf);
+ u32 val;
+ u8 hash;
+
+ hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0);
+
+ val = readl(bdf_to_sid_base + hash * sizeof(u32));
+
+ /* If the register is already populated, look for next available entry */
+ while (val) {
+ u8 current_hash = hash++;
+ u8 next_mask = 0xff;
+
+ /* If NEXT field is NULL then update it with next hash */
+ if (!(val & next_mask)) {
+ val |= (u32)hash;
+ writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
+ }
+
+ val = readl(bdf_to_sid_base + hash * sizeof(u32));
+ }
+
+ /* BDF [31:16] | SID [15:8] | NEXT [7:0] */
+ val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
+ writel(val, bdf_to_sid_base + hash * sizeof(u32));
+ }
+
+ kfree(map);
+
+ return 0;
+}
+
static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- int ret;
-
- res->clks[0].id = "iface";
- res->clks[1].id = "axi_m";
- res->clks[2].id = "axi_s";
- res->clks[3].id = "axi_bridge";
- res->clks[4].id = "rchng";
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->rst = devm_reset_control_array_get_exclusive(dev);
if (IS_ERR(res->rst))
@@ -1331,7 +1180,7 @@ static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
}
static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
@@ -1360,7 +1209,7 @@ static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
usleep_range(2000, 2500);
- return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ return clk_bulk_prepare_enable(res->num_clks, res->clks);
}
static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
@@ -1370,18 +1219,15 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
u32 val;
int i;
- writel(SLV_ADDR_SPACE_SZ,
- pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
- val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
- val &= ~BIT(0);
- writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ qcom_pcie_configure_dbi_atu_base(pcie);
- writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
-
- writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
+ writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
- pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS |
GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL,
pci->dbi_base + GEN3_RELATED_OFF);
@@ -1389,11 +1235,12 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS |
SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
- pcie->parf + PCIE20_PARF_SYS_CTRL);
+ pcie->parf + PARF_SYS_CTRL);
- writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
+ writel(0, pcie->parf + PARF_Q2A_FLUSH);
dw_pcie_dbi_ro_wr_en(pci);
+
writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
@@ -1403,88 +1250,47 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
PCI_EXP_DEVCTL2);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
for (i = 0; i < 256; i++)
- writel(0, pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N + (4 * i));
+ writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i));
return 0;
}
-static int qcom_pcie_link_up(struct dw_pcie *pci)
+static bool qcom_pcie_link_up(struct dw_pcie *pci)
{
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
- return !!(val & PCI_EXP_LNKSTA_DLLLA);
+ return val & PCI_EXP_LNKSTA_DLLLA;
}
-static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
+static void qcom_pcie_phy_power_off(struct qcom_pcie *pcie)
{
- /* iommu map structure */
- struct {
- u32 bdf;
- u32 phandle;
- u32 smmu_sid;
- u32 smmu_sid_len;
- } *map;
- void __iomem *bdf_to_sid_base = pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N;
- struct device *dev = pcie->pci->dev;
- u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
- int i, nr_map, size = 0;
- u32 smmu_sid_base;
-
- of_get_property(dev->of_node, "iommu-map", &size);
- if (!size)
- return 0;
+ struct qcom_pcie_port *port;
- map = kzalloc(size, GFP_KERNEL);
- if (!map)
- return -ENOMEM;
-
- of_property_read_u32_array(dev->of_node,
- "iommu-map", (u32 *)map, size / sizeof(u32));
-
- nr_map = size / (sizeof(*map));
-
- crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
-
- /* Registers need to be zero out first */
- memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
-
- /* Extract the SMMU SID base from the first entry of iommu-map */
- smmu_sid_base = map[0].smmu_sid;
-
- /* Look for an available entry to hold the mapping */
- for (i = 0; i < nr_map; i++) {
- __be16 bdf_be = cpu_to_be16(map[i].bdf);
- u32 val;
- u8 hash;
-
- hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be),
- 0);
-
- val = readl(bdf_to_sid_base + hash * sizeof(u32));
+ list_for_each_entry(port, &pcie->ports, list)
+ phy_power_off(port->phy);
+}
- /* If the register is already populated, look for next available entry */
- while (val) {
- u8 current_hash = hash++;
- u8 next_mask = 0xff;
+static int qcom_pcie_phy_power_on(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_port *port;
+ int ret;
- /* If NEXT field is NULL then update it with next hash */
- if (!(val & next_mask)) {
- val |= (u32)hash;
- writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
- }
+ list_for_each_entry(port, &pcie->ports, list) {
+ ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
+ if (ret)
+ return ret;
- val = readl(bdf_to_sid_base + hash * sizeof(u32));
+ ret = phy_power_on(port->phy);
+ if (ret) {
+ qcom_pcie_phy_power_off(pcie);
+ return ret;
}
-
- /* BDF [31:16] | SID [15:8] | NEXT [7:0] */
- val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
- writel(val, bdf_to_sid_base + hash * sizeof(u32));
}
- kfree(map);
-
return 0;
}
@@ -1500,11 +1306,7 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
if (ret)
return ret;
- ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
- if (ret)
- goto err_deinit;
-
- ret = phy_power_on(pcie->phy);
+ ret = qcom_pcie_phy_power_on(pcie);
if (ret)
goto err_deinit;
@@ -1527,15 +1329,36 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
err_assert_reset:
qcom_ep_reset_assert(pcie);
err_disable_phy:
- phy_power_off(pcie->phy);
+ qcom_pcie_phy_power_off(pcie);
err_deinit:
pcie->cfg->ops->deinit(pcie);
return ret;
}
+static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+
+ qcom_ep_reset_assert(pcie);
+ qcom_pcie_phy_power_off(pcie);
+ pcie->cfg->ops->deinit(pcie);
+}
+
+static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+
+ if (pcie->cfg->ops->host_post_init)
+ pcie->cfg->ops->host_post_init(pcie);
+}
+
static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
- .host_init = qcom_pcie_host_init,
+ .init = qcom_pcie_host_init,
+ .deinit = qcom_pcie_host_deinit,
+ .post_init = qcom_pcie_host_post_init,
};
/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
@@ -1569,7 +1392,7 @@ static const struct qcom_pcie_ops ops_2_3_2 = {
static const struct qcom_pcie_ops ops_2_4_0 = {
.get_resources = qcom_pcie_get_resources_2_4_0,
.init = qcom_pcie_init_2_4_0,
- .post_init = qcom_pcie_post_init_2_4_0,
+ .post_init = qcom_pcie_post_init_2_3_2,
.deinit = qcom_pcie_deinit_2_4_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
@@ -1587,6 +1410,7 @@ static const struct qcom_pcie_ops ops_2_3_3 = {
static const struct qcom_pcie_ops ops_2_7_0 = {
.get_resources = qcom_pcie_get_resources_2_7_0,
.init = qcom_pcie_init_2_7_0,
+ .post_init = qcom_pcie_post_init_2_7_0,
.deinit = qcom_pcie_deinit_2_7_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
@@ -1595,9 +1419,21 @@ static const struct qcom_pcie_ops ops_2_7_0 = {
static const struct qcom_pcie_ops ops_1_9_0 = {
.get_resources = qcom_pcie_get_resources_2_7_0,
.init = qcom_pcie_init_2_7_0,
+ .post_init = qcom_pcie_post_init_2_7_0,
+ .host_post_init = qcom_pcie_host_post_init_2_7_0,
+ .deinit = qcom_pcie_deinit_2_7_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+ .config_sid = qcom_pcie_config_sid_1_9_0,
+};
+
+/* Qcom IP rev.: 1.21.0 Synopsys IP rev.: 5.60a */
+static const struct qcom_pcie_ops ops_1_21_0 = {
+ .get_resources = qcom_pcie_get_resources_2_7_0,
+ .init = qcom_pcie_init_2_7_0,
+ .post_init = qcom_pcie_post_init_2_7_0,
+ .host_post_init = qcom_pcie_host_post_init_2_7_0,
.deinit = qcom_pcie_deinit_2_7_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
- .config_sid = qcom_pcie_config_sid_sm8250,
};
/* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */
@@ -1617,6 +1453,11 @@ static const struct qcom_pcie_cfg cfg_1_9_0 = {
.ops = &ops_1_9_0,
};
+static const struct qcom_pcie_cfg cfg_1_34_0 = {
+ .ops = &ops_1_9_0,
+ .override_no_snoop = true,
+};
+
static const struct qcom_pcie_cfg cfg_2_1_0 = {
.ops = &ops_2_1_0,
};
@@ -1641,9 +1482,19 @@ static const struct qcom_pcie_cfg cfg_2_9_0 = {
.ops = &ops_2_9_0,
};
+static const struct qcom_pcie_cfg cfg_sc8280xp = {
+ .ops = &ops_1_21_0,
+ .no_l0s = true,
+};
+
+static const struct qcom_pcie_cfg cfg_fw_managed = {
+ .firmware_managed = true,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
.start_link = qcom_pcie_start_link,
+ .assert_perst = qcom_pcie_assert_perst,
};
static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
@@ -1655,6 +1506,9 @@ static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
if (IS_ERR(pcie->icc_mem))
return PTR_ERR(pcie->icc_mem);
+ pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie");
+ if (IS_ERR(pcie->icc_cpu))
+ return PTR_ERR(pcie->icc_cpu);
/*
* Some Qualcomm platforms require interconnect bandwidth constraints
* to be set before enabling interconnect clocks.
@@ -1662,25 +1516,38 @@ static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
* Set an initial peak bandwidth corresponding to single-lane Gen 1
* for the pcie-mem path.
*/
- ret = icc_set_bw(pcie->icc_mem, 0, MBps_to_icc(250));
+ ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * Since the CPU-PCIe path is only used for activities like register
+ * access of the host controller and endpoint Config/BAR space access,
+ * HW team has recommended to use a minimal bandwidth of 1KBps just to
+ * keep the path active.
+ */
+ ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1));
if (ret) {
- dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n",
ret);
+ icc_set_bw(pcie->icc_mem, 0, 0);
return ret;
}
return 0;
}
-static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
+static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie)
{
+ u32 offset, status, width, speed;
struct dw_pcie *pci = pcie->pci;
- u32 offset, status, bw;
- int speed, width;
- int ret;
-
- if (!pcie->icc_mem)
- return;
+ struct dev_pm_opp_key key = {};
+ unsigned long freq_kbps;
+ struct dev_pm_opp *opp;
+ int ret, freq_mbps;
offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
@@ -1692,56 +1559,312 @@ static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
- switch (speed) {
- case 1:
- bw = MBps_to_icc(250);
- break;
- case 2:
- bw = MBps_to_icc(500);
- break;
- default:
- WARN_ON_ONCE(1);
- fallthrough;
- case 3:
- bw = MBps_to_icc(985);
- break;
+ if (pcie->icc_mem) {
+ ret = icc_set_bw(pcie->icc_mem, 0,
+ width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ }
+ } else if (pcie->use_pm_opp) {
+ freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]);
+ if (freq_mbps < 0)
+ return;
+
+ freq_kbps = freq_mbps * KILO;
+ opp = dev_pm_opp_find_level_exact(pci->dev, speed);
+ if (IS_ERR(opp)) {
+ /* opp-level is not defined use only frequency */
+ opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width,
+ true);
+ } else {
+ /* put opp-level OPP */
+ dev_pm_opp_put(opp);
+
+ key.freq = freq_kbps * width;
+ key.level = speed;
+ key.bw = 0;
+ opp = dev_pm_opp_find_key_exact(pci->dev, &key, true);
+ }
+ if (!IS_ERR(opp)) {
+ ret = dev_pm_opp_set_opp(pci->dev, opp);
+ if (ret)
+ dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n",
+ freq_kbps * width, ret);
+ dev_pm_opp_put(opp);
+ }
}
+}
- ret = icc_set_bw(pcie->icc_mem, 0, width * bw);
- if (ret) {
- dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
- ret);
+static int qcom_pcie_link_transition_count(struct seq_file *s, void *data)
+{
+ struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private);
+
+ seq_printf(s, "L0s transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S));
+
+ seq_printf(s, "L1 transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1));
+
+ seq_printf(s, "L1.1 transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1));
+
+ seq_printf(s, "L1.2 transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2));
+
+ seq_printf(s, "L2 transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2));
+
+ return 0;
+}
+
+static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ char *name;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name)
+ return;
+
+ pcie->debugfs = debugfs_create_dir(name, NULL);
+ debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs,
+ qcom_pcie_link_transition_count);
+}
+
+static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
+{
+ struct qcom_pcie *pcie = data;
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
+ struct device *dev = pcie->pci->dev;
+ u32 status = readl_relaxed(pcie->parf + PARF_INT_ALL_STATUS);
+
+ writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR);
+
+ if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+ dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
+ /* Rescan the bus to enumerate endpoint devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(pp->bridge->bus);
+ pci_unlock_rescan_remove();
+
+ qcom_pcie_icc_opp_update(pcie);
+ } else {
+ dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
+ status);
}
+
+ return IRQ_HANDLED;
}
-static int qcom_pcie_probe(struct platform_device *pdev)
+static void qcom_pci_free_msi(void *ptr)
{
- struct device *dev = &pdev->dev;
+ struct dw_pcie_rp *pp = (struct dw_pcie_rp *)ptr;
+
+ if (pp && pp->has_msi_ctrl)
+ dw_pcie_free_msi(pp);
+}
+
+static int qcom_pcie_ecam_host_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
struct dw_pcie_rp *pp;
struct dw_pcie *pci;
- struct qcom_pcie *pcie;
- const struct qcom_pcie_cfg *pcie_cfg;
int ret;
- pcie_cfg = of_device_get_match_data(dev);
- if (!pcie_cfg || !pcie_cfg->ops) {
- dev_err(dev, "Invalid platform data\n");
- return -EINVAL;
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pp = &pci->pp;
+ pci->dbi_base = cfg->win;
+ pp->num_vectors = MSI_DEF_NUM_VECTORS;
+
+ ret = dw_pcie_msi_host_init(pp);
+ if (ret)
+ return ret;
+
+ pp->has_msi_ctrl = true;
+ dw_pcie_msi_init(pp);
+
+ return devm_add_action_or_reset(dev, qcom_pci_free_msi, pp);
+}
+
+static const struct pci_ecam_ops pci_qcom_ecam_ops = {
+ .init = qcom_pcie_ecam_host_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
}
+};
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
+static int qcom_pcie_parse_port(struct qcom_pcie *pcie, struct device_node *node)
+{
+ struct device *dev = pcie->pci->dev;
+ struct qcom_pcie_port *port;
+ struct gpio_desc *reset;
+ struct phy *phy;
+ int ret;
+
+ reset = devm_fwnode_gpiod_get(dev, of_fwnode_handle(node),
+ "reset", GPIOD_OUT_HIGH, "PERST#");
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ phy = devm_of_phy_get(dev, node, NULL);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
return -ENOMEM;
- pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
- if (!pci)
+ ret = phy_init(phy);
+ if (ret)
+ return ret;
+
+ port->reset = reset;
+ port->phy = phy;
+ INIT_LIST_HEAD(&port->list);
+ list_add_tail(&port->list, &pcie->ports);
+
+ return 0;
+}
+
+static int qcom_pcie_parse_ports(struct qcom_pcie *pcie)
+{
+ struct device *dev = pcie->pci->dev;
+ struct qcom_pcie_port *port, *tmp;
+ int ret = -ENOENT;
+
+ for_each_available_child_of_node_scoped(dev->of_node, of_port) {
+ if (!of_node_is_type(of_port, "pci"))
+ continue;
+ ret = qcom_pcie_parse_port(pcie, of_port);
+ if (ret)
+ goto err_port_del;
+ }
+
+ return ret;
+
+err_port_del:
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ phy_exit(port->phy);
+ list_del(&port->list);
+ }
+
+ return ret;
+}
+
+static int qcom_pcie_parse_legacy_binding(struct qcom_pcie *pcie)
+{
+ struct device *dev = pcie->pci->dev;
+ struct qcom_pcie_port *port;
+ struct gpio_desc *reset;
+ struct phy *phy;
+ int ret;
+
+ phy = devm_phy_optional_get(dev, "pciephy");
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ ret = phy_init(phy);
+ if (ret)
+ return ret;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
return -ENOMEM;
+ port->reset = reset;
+ port->phy = phy;
+ INIT_LIST_HEAD(&port->list);
+ list_add_tail(&port->list, &pcie->ports);
+
+ return 0;
+}
+
+static int qcom_pcie_probe(struct platform_device *pdev)
+{
+ const struct qcom_pcie_cfg *pcie_cfg;
+ unsigned long max_freq = ULONG_MAX;
+ struct qcom_pcie_port *port, *tmp;
+ struct device *dev = &pdev->dev;
+ struct dev_pm_opp *opp;
+ struct qcom_pcie *pcie;
+ struct dw_pcie_rp *pp;
+ struct resource *res;
+ struct dw_pcie *pci;
+ int ret, irq;
+ char *name;
+
+ pcie_cfg = of_device_get_match_data(dev);
+ if (!pcie_cfg) {
+ dev_err(dev, "No platform data\n");
+ return -ENODATA;
+ }
+
+ if (!pcie_cfg->firmware_managed && !pcie_cfg->ops) {
+ dev_err(dev, "No platform ops\n");
+ return -ENODATA;
+ }
+
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_pm_runtime_put;
+ if (pcie_cfg->firmware_managed) {
+ struct pci_host_bridge *bridge;
+ struct pci_config_window *cfg;
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge) {
+ ret = -ENOMEM;
+ goto err_pm_runtime_put;
+ }
+
+ /* Parse and map our ECAM configuration space area */
+ cfg = pci_host_common_ecam_create(dev, bridge,
+ &pci_qcom_ecam_ops);
+ if (IS_ERR(cfg)) {
+ ret = PTR_ERR(cfg);
+ goto err_pm_runtime_put;
+ }
+
+ bridge->sysdata = cfg;
+ bridge->ops = (struct pci_ops *)&pci_qcom_ecam_ops.pci_ops;
+ bridge->msi_domain = true;
+
+ ret = pci_host_probe(bridge);
+ if (ret)
+ goto err_pm_runtime_put;
+
+ return 0;
+ }
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie) {
+ ret = -ENOMEM;
+ goto err_pm_runtime_put;
+ }
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci) {
+ ret = -ENOMEM;
+ goto err_pm_runtime_put;
+ }
+
+ INIT_LIST_HEAD(&pcie->ports);
+
pci->dev = dev;
pci->ops = &dw_pcie_ops;
pp = &pci->pp;
@@ -1750,33 +1873,61 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pcie->cfg = pcie_cfg;
- pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
- if (IS_ERR(pcie->reset)) {
- ret = PTR_ERR(pcie->reset);
- goto err_pm_runtime_put;
- }
-
pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
if (IS_ERR(pcie->parf)) {
ret = PTR_ERR(pcie->parf);
goto err_pm_runtime_put;
}
- pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
- if (IS_ERR(pcie->elbi)) {
- ret = PTR_ERR(pcie->elbi);
- goto err_pm_runtime_put;
+ /* MHI region is optional */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi");
+ if (res) {
+ pcie->mhi = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->mhi)) {
+ ret = PTR_ERR(pcie->mhi);
+ goto err_pm_runtime_put;
+ }
}
- pcie->phy = devm_phy_optional_get(dev, "pciephy");
- if (IS_ERR(pcie->phy)) {
- ret = PTR_ERR(pcie->phy);
+ /* OPP table is optional */
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret && ret != -ENODEV) {
+ dev_err_probe(dev, ret, "Failed to add OPP table\n");
goto err_pm_runtime_put;
}
- ret = qcom_pcie_icc_init(pcie);
- if (ret)
- goto err_pm_runtime_put;
+ /*
+ * Before the PCIe link is initialized, vote for highest OPP in the OPP
+ * table, so that we are voting for maximum voltage corner for the
+ * link to come up in maximum supported speed. At the end of the
+ * probe(), OPP will be updated using qcom_pcie_icc_opp_update().
+ */
+ if (!ret) {
+ opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ dev_err_probe(pci->dev, ret,
+ "Unable to find max freq OPP\n");
+ goto err_pm_runtime_put;
+ } else {
+ ret = dev_pm_opp_set_opp(dev, opp);
+ }
+
+ dev_pm_opp_put(opp);
+ if (ret) {
+ dev_err_probe(pci->dev, ret,
+ "Failed to set OPP for freq %lu\n",
+ max_freq);
+ goto err_pm_runtime_put;
+ }
+
+ pcie->use_pm_opp = true;
+ } else {
+ /* Skip ICC init if OPP is supported as it is handled by OPP */
+ ret = qcom_pcie_icc_init(pcie);
+ if (ret)
+ goto err_pm_runtime_put;
+ }
ret = pcie->cfg->ops->get_resources(pcie);
if (ret)
@@ -1784,24 +1935,71 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pp->ops = &qcom_pcie_dw_ops;
- ret = phy_init(pcie->phy);
- if (ret)
- goto err_pm_runtime_put;
+ ret = qcom_pcie_parse_ports(pcie);
+ if (ret) {
+ if (ret != -ENOENT) {
+ dev_err_probe(pci->dev, ret,
+ "Failed to parse Root Port: %d\n", ret);
+ goto err_pm_runtime_put;
+ }
+
+ /*
+ * In the case of properties not populated in Root Port node,
+ * fallback to the legacy method of parsing the Host Bridge
+ * node. This is to maintain DT backwards compatibility.
+ */
+ ret = qcom_pcie_parse_legacy_binding(pcie);
+ if (ret)
+ goto err_pm_runtime_put;
+ }
platform_set_drvdata(pdev, pcie);
+ irq = platform_get_irq_byname_optional(pdev, "global");
+ if (irq > 0)
+ pp->use_linkup_irq = true;
+
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "cannot initialize host\n");
goto err_phy_exit;
}
- qcom_pcie_icc_update(pcie);
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_global_irq%d",
+ pci_domain_nr(pp->bridge->bus));
+ if (!name) {
+ ret = -ENOMEM;
+ goto err_host_deinit;
+ }
+
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ qcom_pcie_global_irq_thread,
+ IRQF_ONESHOT, name, pcie);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret,
+ "Failed to request Global IRQ\n");
+ goto err_host_deinit;
+ }
+
+ writel_relaxed(PARF_INT_ALL_LINK_UP | PARF_INT_MSI_DEV_0_7,
+ pcie->parf + PARF_INT_ALL_MASK);
+ }
+
+ qcom_pcie_icc_opp_update(pcie);
+
+ if (pcie->mhi)
+ qcom_pcie_init_debugfs(pcie);
return 0;
+err_host_deinit:
+ dw_pcie_host_deinit(pp);
err_phy_exit:
- phy_exit(pcie->phy);
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ phy_exit(port->phy);
+ list_del(&port->list);
+ }
err_pm_runtime_put:
pm_runtime_put(dev);
pm_runtime_disable(dev);
@@ -1809,25 +2007,124 @@ err_pm_runtime_put:
return ret;
}
+static int qcom_pcie_suspend_noirq(struct device *dev)
+{
+ struct qcom_pcie *pcie;
+ int ret = 0;
+
+ pcie = dev_get_drvdata(dev);
+ if (!pcie)
+ return 0;
+
+ /*
+ * Set minimum bandwidth required to keep data path functional during
+ * suspend.
+ */
+ if (pcie->icc_mem) {
+ ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
+ if (ret) {
+ dev_err(dev,
+ "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Turn OFF the resources only for controllers without active PCIe
+ * devices. For controllers with active devices, the resources are kept
+ * ON and the link is expected to be in L0/L1 (sub)states.
+ *
+ * Turning OFF the resources for controllers with active PCIe devices
+ * will trigger access violation during the end of the suspend cycle,
+ * as kernel tries to access the PCIe devices config space for masking
+ * MSIs.
+ *
+ * Also, it is not desirable to put the link into L2/L3 state as that
+ * implies VDD supply will be removed and the devices may go into
+ * powerdown state. This will affect the lifetime of the storage devices
+ * like NVMe.
+ */
+ if (!dw_pcie_link_up(pcie->pci)) {
+ qcom_pcie_host_deinit(&pcie->pci->pp);
+ pcie->suspended = true;
+ }
+
+ /*
+ * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM.
+ * Because on some platforms, DBI access can happen very late during the
+ * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC
+ * error.
+ */
+ if (pm_suspend_target_state != PM_SUSPEND_MEM) {
+ ret = icc_disable(pcie->icc_cpu);
+ if (ret)
+ dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret);
+
+ if (pcie->use_pm_opp)
+ dev_pm_opp_set_opp(pcie->pci->dev, NULL);
+ }
+ return ret;
+}
+
+static int qcom_pcie_resume_noirq(struct device *dev)
+{
+ struct qcom_pcie *pcie;
+ int ret;
+
+ pcie = dev_get_drvdata(dev);
+ if (!pcie)
+ return 0;
+
+ if (pm_suspend_target_state != PM_SUSPEND_MEM) {
+ ret = icc_enable(pcie->icc_cpu);
+ if (ret) {
+ dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (pcie->suspended) {
+ ret = qcom_pcie_host_init(&pcie->pci->pp);
+ if (ret)
+ return ret;
+
+ pcie->suspended = false;
+ }
+
+ qcom_pcie_icc_opp_update(pcie);
+
+ return 0;
+}
+
static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
{ .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
+ { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
+ { .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
- { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sa8255p", .data = &cfg_fw_managed },
+ { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
+ { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},
{ .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
- { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
{ .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
+ { .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-x1e80100", .data = &cfg_sc8280xp },
{ }
};
@@ -1843,12 +2140,18 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
+static const struct dev_pm_ops qcom_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq)
+};
+
static struct platform_driver qcom_pcie_driver = {
.probe = qcom_pcie_probe,
.driver = {
.name = "qcom-pcie",
.suppress_bind_attrs = true,
.of_match_table = qcom_pcie_match,
+ .pm = &qcom_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
builtin_platform_driver(qcom_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
new file mode 100644
index 000000000000..80778917d2dd
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -0,0 +1,804 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCIe controller driver for Renesas R-Car Gen4 Series SoCs
+ * Copyright (C) 2022-2023 Renesas Electronics Corporation
+ *
+ * The r8a779g0 (R-Car V4H) controller requires a specific firmware to be
+ * provided, to initialize the PHY. Otherwise, the PCIe controller will not
+ * work.
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+/* Renesas-specific */
+/* PCIe Mode Setting Register 0 */
+#define PCIEMSR0 0x0000
+#define APP_SRIS_MODE BIT(6)
+#define DEVICE_TYPE_EP 0
+#define DEVICE_TYPE_RC BIT(4)
+#define BIFUR_MOD_SET_ON BIT(0)
+
+/* PCIe Interrupt Status 0 */
+#define PCIEINTSTS0 0x0084
+
+/* PCIe Interrupt Status 0 Enable */
+#define PCIEINTSTS0EN 0x0310
+#define MSI_CTRL_INT BIT(26)
+#define SMLH_LINK_UP BIT(7)
+#define RDLH_LINK_UP BIT(6)
+
+/* PCIe DMA Interrupt Status Enable */
+#define PCIEDMAINTSTSEN 0x0314
+#define PCIEDMAINTSTSEN_INIT GENMASK(15, 0)
+
+/* Port Logic Registers 89 */
+#define PRTLGC89 0x0b70
+
+/* Port Logic Registers 90 */
+#define PRTLGC90 0x0b74
+
+/* PCIe Reset Control Register 1 */
+#define PCIERSTCTRL1 0x0014
+#define APP_HOLD_PHY_RST BIT(16)
+#define APP_LTSSM_ENABLE BIT(0)
+
+/* PCIe Power Management Control */
+#define PCIEPWRMNGCTRL 0x0070
+#define APP_CLK_REQ_N BIT(11)
+#define APP_CLK_PM_EN BIT(10)
+
+#define RCAR_NUM_SPEED_CHANGE_RETRIES 10
+#define RCAR_MAX_LINK_SPEED 4
+
+#define RCAR_GEN4_PCIE_EP_FUNC_DBI_OFFSET 0x1000
+#define RCAR_GEN4_PCIE_EP_FUNC_DBI2_OFFSET 0x800
+
+#define RCAR_GEN4_PCIE_FIRMWARE_NAME "rcar_gen4_pcie.bin"
+#define RCAR_GEN4_PCIE_FIRMWARE_BASE_ADDR 0xc000
+MODULE_FIRMWARE(RCAR_GEN4_PCIE_FIRMWARE_NAME);
+
+struct rcar_gen4_pcie;
+struct rcar_gen4_pcie_drvdata {
+ void (*additional_common_init)(struct rcar_gen4_pcie *rcar);
+ int (*ltssm_control)(struct rcar_gen4_pcie *rcar, bool enable);
+ enum dw_pcie_device_mode mode;
+};
+
+struct rcar_gen4_pcie {
+ struct dw_pcie dw;
+ void __iomem *base;
+ void __iomem *phy_base;
+ struct platform_device *pdev;
+ const struct rcar_gen4_pcie_drvdata *drvdata;
+};
+#define to_rcar_gen4_pcie(_dw) container_of(_dw, struct rcar_gen4_pcie, dw)
+
+/* Common */
+static bool rcar_gen4_pcie_link_up(struct dw_pcie *dw)
+{
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+ u32 val, mask;
+
+ val = readl(rcar->base + PCIEINTSTS0);
+ mask = RDLH_LINK_UP | SMLH_LINK_UP;
+
+ return (val & mask) == mask;
+}
+
+/*
+ * Manually initiate the speed change. Return 0 if change succeeded; otherwise
+ * -ETIMEDOUT.
+ */
+static int rcar_gen4_pcie_speed_change(struct dw_pcie *dw)
+{
+ u32 val;
+ int i;
+
+ val = dw_pcie_readl_dbi(dw, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val &= ~PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(dw, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ val = dw_pcie_readl_dbi(dw, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(dw, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ for (i = 0; i < RCAR_NUM_SPEED_CHANGE_RETRIES; i++) {
+ val = dw_pcie_readl_dbi(dw, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ if (!(val & PORT_LOGIC_SPEED_CHANGE))
+ return 0;
+ usleep_range(10000, 11000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * Enable LTSSM of this controller and manually initiate the speed change.
+ * Always return 0.
+ */
+static int rcar_gen4_pcie_start_link(struct dw_pcie *dw)
+{
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+ int i, changes, ret;
+
+ if (rcar->drvdata->ltssm_control) {
+ ret = rcar->drvdata->ltssm_control(rcar, true);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Require direct speed change with retrying here if the max_link_speed
+ * is PCIe Gen2 or higher.
+ */
+ changes = min_not_zero(dw->max_link_speed, RCAR_MAX_LINK_SPEED) - 1;
+
+ /*
+ * Since dw_pcie_setup_rc() sets it once, PCIe Gen2 will be trained.
+ * So, this needs remaining times for up to PCIe Gen4 if RC mode.
+ */
+ if (changes && rcar->drvdata->mode == DW_PCIE_RC_TYPE)
+ changes--;
+
+ for (i = 0; i < changes; i++) {
+ /* It may not be connected in EP mode yet. So, break the loop */
+ if (rcar_gen4_pcie_speed_change(dw))
+ break;
+ }
+
+ return 0;
+}
+
+static void rcar_gen4_pcie_stop_link(struct dw_pcie *dw)
+{
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+
+ if (rcar->drvdata->ltssm_control)
+ rcar->drvdata->ltssm_control(rcar, false);
+}
+
+static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie *dw = &rcar->dw;
+ u32 val;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(DW_PCIE_NUM_CORE_CLKS, dw->core_clks);
+ if (ret) {
+ dev_err(dw->dev, "Enabling core clocks failed\n");
+ return ret;
+ }
+
+ if (!reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc)) {
+ reset_control_assert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ /*
+ * R-Car V4H Reference Manual R19UH0186EJ0130 Rev.1.30 Apr.
+ * 21, 2025 page 585 Figure 9.3.2 Software Reset flow (B)
+ * indicates that for peripherals in HSC domain, after
+ * reset has been asserted by writing a matching reset bit
+ * into register SRCR, it is mandatory to wait 1ms.
+ */
+ fsleep(1000);
+ }
+
+ val = readl(rcar->base + PCIEMSR0);
+ if (rcar->drvdata->mode == DW_PCIE_RC_TYPE) {
+ val |= DEVICE_TYPE_RC;
+ } else if (rcar->drvdata->mode == DW_PCIE_EP_TYPE) {
+ val |= DEVICE_TYPE_EP;
+ } else {
+ ret = -EINVAL;
+ goto err_unprepare;
+ }
+
+ if (dw->num_lanes < 4)
+ val |= BIFUR_MOD_SET_ON;
+
+ writel(val, rcar->base + PCIEMSR0);
+
+ ret = reset_control_deassert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ if (ret)
+ goto err_unprepare;
+
+ /*
+ * Assure the reset is latched and the core is ready for DBI access.
+ * On R-Car V4H, the PCIe reset is asynchronous and does not take
+ * effect immediately, but needs a short time to complete. In case
+ * DBI access happens in that short time, that access generates an
+ * SError. To make sure that condition can never happen, read back the
+ * state of the reset, which should turn the asynchronous reset into
+ * synchronous one, and wait a little over 1ms to add additional
+ * safety margin.
+ */
+ reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ fsleep(1000);
+
+ if (rcar->drvdata->additional_common_init)
+ rcar->drvdata->additional_common_init(rcar);
+
+ return 0;
+
+err_unprepare:
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, dw->core_clks);
+
+ return ret;
+}
+
+static void rcar_gen4_pcie_common_deinit(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie *dw = &rcar->dw;
+
+ reset_control_assert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, dw->core_clks);
+}
+
+static int rcar_gen4_pcie_prepare(struct rcar_gen4_pcie *rcar)
+{
+ struct device *dev = rcar->dw.dev;
+ int err;
+
+ pm_runtime_enable(dev);
+ err = pm_runtime_resume_and_get(dev);
+ if (err < 0) {
+ dev_err(dev, "Runtime resume failed\n");
+ pm_runtime_disable(dev);
+ }
+
+ return err;
+}
+
+static void rcar_gen4_pcie_unprepare(struct rcar_gen4_pcie *rcar)
+{
+ struct device *dev = rcar->dw.dev;
+
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+}
+
+static int rcar_gen4_pcie_get_resources(struct rcar_gen4_pcie *rcar)
+{
+ rcar->phy_base = devm_platform_ioremap_resource_byname(rcar->pdev, "phy");
+ if (IS_ERR(rcar->phy_base))
+ return PTR_ERR(rcar->phy_base);
+
+ /* Renesas-specific registers */
+ rcar->base = devm_platform_ioremap_resource_byname(rcar->pdev, "app");
+
+ return PTR_ERR_OR_ZERO(rcar->base);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = rcar_gen4_pcie_start_link,
+ .stop_link = rcar_gen4_pcie_stop_link,
+ .link_up = rcar_gen4_pcie_link_up,
+};
+
+static struct rcar_gen4_pcie *rcar_gen4_pcie_alloc(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rcar_gen4_pcie *rcar;
+
+ rcar = devm_kzalloc(dev, sizeof(*rcar), GFP_KERNEL);
+ if (!rcar)
+ return ERR_PTR(-ENOMEM);
+
+ rcar->dw.ops = &dw_pcie_ops;
+ rcar->dw.dev = dev;
+ rcar->pdev = pdev;
+ rcar->dw.edma.mf = EDMA_MF_EDMA_UNROLL;
+ dw_pcie_cap_set(&rcar->dw, REQ_RES);
+ platform_set_drvdata(pdev, rcar);
+
+ return rcar;
+}
+
+/* Host mode */
+static int rcar_gen4_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *dw = to_dw_pcie_from_pp(pp);
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+ int ret;
+ u32 val;
+
+ gpiod_set_value_cansleep(dw->pe_rst, 1);
+
+ ret = rcar_gen4_pcie_common_init(rcar);
+ if (ret)
+ return ret;
+
+ /*
+ * According to the section 3.5.7.2 "RC Mode" in DWC PCIe Dual Mode
+ * Rev.5.20a and 3.5.6.1 "RC mode" in DWC PCIe RC databook v5.20a, we
+ * should disable two BARs to avoid unnecessary memory assignment
+ * during device enumeration.
+ */
+ dw_pcie_writel_dbi2(dw, PCI_BASE_ADDRESS_0, 0x0);
+ dw_pcie_writel_dbi2(dw, PCI_BASE_ADDRESS_1, 0x0);
+
+ /* Enable MSI interrupt signal */
+ val = readl(rcar->base + PCIEINTSTS0EN);
+ val |= MSI_CTRL_INT;
+ writel(val, rcar->base + PCIEINTSTS0EN);
+
+ msleep(PCIE_T_PVPERL_MS); /* pe_rst requires 100msec delay */
+
+ gpiod_set_value_cansleep(dw->pe_rst, 0);
+
+ return 0;
+}
+
+static void rcar_gen4_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *dw = to_dw_pcie_from_pp(pp);
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+
+ gpiod_set_value_cansleep(dw->pe_rst, 1);
+ rcar_gen4_pcie_common_deinit(rcar);
+}
+
+static const struct dw_pcie_host_ops rcar_gen4_pcie_host_ops = {
+ .init = rcar_gen4_pcie_host_init,
+ .deinit = rcar_gen4_pcie_host_deinit,
+};
+
+static int rcar_gen4_add_dw_pcie_rp(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie_rp *pp = &rcar->dw.pp;
+
+ if (!IS_ENABLED(CONFIG_PCIE_RCAR_GEN4_HOST))
+ return -ENODEV;
+
+ pp->num_vectors = MAX_MSI_IRQS;
+ pp->ops = &rcar_gen4_pcie_host_ops;
+
+ return dw_pcie_host_init(pp);
+}
+
+static void rcar_gen4_remove_dw_pcie_rp(struct rcar_gen4_pcie *rcar)
+{
+ dw_pcie_host_deinit(&rcar->dw.pp);
+}
+
+/* Endpoint mode */
+static void rcar_gen4_pcie_ep_pre_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *dw = to_dw_pcie_from_ep(ep);
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+ int ret;
+
+ ret = rcar_gen4_pcie_common_init(rcar);
+ if (ret)
+ return;
+
+ writel(PCIEDMAINTSTSEN_INIT, rcar->base + PCIEDMAINTSTSEN);
+}
+
+static void rcar_gen4_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static void rcar_gen4_pcie_ep_deinit(struct rcar_gen4_pcie *rcar)
+{
+ writel(0, rcar->base + PCIEDMAINTSTSEN);
+ rcar_gen4_pcie_common_deinit(rcar);
+}
+
+static int rcar_gen4_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *dw = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(dw->dev, "Unknown IRQ type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features rcar_gen4_pcie_epc_features = {
+ .msi_capable = true,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256 },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_1M,
+};
+
+static const struct pci_epc_features*
+rcar_gen4_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ return &rcar_gen4_pcie_epc_features;
+}
+
+static unsigned int rcar_gen4_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ return func_no * RCAR_GEN4_PCIE_EP_FUNC_DBI_OFFSET;
+}
+
+static unsigned int rcar_gen4_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ return func_no * RCAR_GEN4_PCIE_EP_FUNC_DBI2_OFFSET;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .pre_init = rcar_gen4_pcie_ep_pre_init,
+ .init = rcar_gen4_pcie_ep_init,
+ .raise_irq = rcar_gen4_pcie_ep_raise_irq,
+ .get_features = rcar_gen4_pcie_ep_get_features,
+ .get_dbi_offset = rcar_gen4_pcie_ep_get_dbi_offset,
+ .get_dbi2_offset = rcar_gen4_pcie_ep_get_dbi2_offset,
+};
+
+static int rcar_gen4_add_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie_ep *ep = &rcar->dw.ep;
+ struct device *dev = rcar->dw.dev;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_PCIE_RCAR_GEN4_EP))
+ return -ENODEV;
+
+ ep->ops = &pcie_ep_ops;
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ rcar_gen4_pcie_ep_deinit(rcar);
+ return ret;
+ }
+
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ rcar_gen4_pcie_ep_deinit(rcar);
+ }
+
+ pci_epc_init_notify(ep->epc);
+
+ return ret;
+}
+
+static void rcar_gen4_remove_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
+{
+ dw_pcie_ep_deinit(&rcar->dw.ep);
+ rcar_gen4_pcie_ep_deinit(rcar);
+}
+
+/* Common */
+static int rcar_gen4_add_dw_pcie(struct rcar_gen4_pcie *rcar)
+{
+ rcar->drvdata = of_device_get_match_data(&rcar->pdev->dev);
+ if (!rcar->drvdata)
+ return -EINVAL;
+
+ switch (rcar->drvdata->mode) {
+ case DW_PCIE_RC_TYPE:
+ return rcar_gen4_add_dw_pcie_rp(rcar);
+ case DW_PCIE_EP_TYPE:
+ return rcar_gen4_add_dw_pcie_ep(rcar);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rcar_gen4_pcie_probe(struct platform_device *pdev)
+{
+ struct rcar_gen4_pcie *rcar;
+ int err;
+
+ rcar = rcar_gen4_pcie_alloc(pdev);
+ if (IS_ERR(rcar))
+ return PTR_ERR(rcar);
+
+ err = rcar_gen4_pcie_get_resources(rcar);
+ if (err)
+ return err;
+
+ err = rcar_gen4_pcie_prepare(rcar);
+ if (err)
+ return err;
+
+ err = rcar_gen4_add_dw_pcie(rcar);
+ if (err)
+ goto err_unprepare;
+
+ return 0;
+
+err_unprepare:
+ rcar_gen4_pcie_unprepare(rcar);
+
+ return err;
+}
+
+static void rcar_gen4_remove_dw_pcie(struct rcar_gen4_pcie *rcar)
+{
+ switch (rcar->drvdata->mode) {
+ case DW_PCIE_RC_TYPE:
+ rcar_gen4_remove_dw_pcie_rp(rcar);
+ break;
+ case DW_PCIE_EP_TYPE:
+ rcar_gen4_remove_dw_pcie_ep(rcar);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rcar_gen4_pcie_remove(struct platform_device *pdev)
+{
+ struct rcar_gen4_pcie *rcar = platform_get_drvdata(pdev);
+
+ rcar_gen4_remove_dw_pcie(rcar);
+ rcar_gen4_pcie_unprepare(rcar);
+}
+
+static int r8a779f0_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable)
+{
+ u32 val;
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ if (enable) {
+ val |= APP_LTSSM_ENABLE;
+ val &= ~APP_HOLD_PHY_RST;
+ } else {
+ /*
+ * Since the datasheet of R-Car doesn't mention how to assert
+ * the APP_HOLD_PHY_RST, don't assert it again. Otherwise,
+ * hang-up issue happened in the dw_edma_core_off() when
+ * the controller didn't detect a PCI device.
+ */
+ val &= ~APP_LTSSM_ENABLE;
+ }
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+}
+
+static void rcar_gen4_pcie_additional_common_init(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie *dw = &rcar->dw;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(dw, PCIE_PORT_LANE_SKEW);
+ val &= ~PORT_LANE_SKEW_INSERT_MASK;
+ if (dw->num_lanes < 4)
+ val |= BIT(6);
+ dw_pcie_writel_dbi(dw, PCIE_PORT_LANE_SKEW, val);
+
+ val = readl(rcar->base + PCIEPWRMNGCTRL);
+ val |= APP_CLK_REQ_N | APP_CLK_PM_EN;
+ writel(val, rcar->base + PCIEPWRMNGCTRL);
+}
+
+static void rcar_gen4_pcie_phy_reg_update_bits(struct rcar_gen4_pcie *rcar,
+ u32 offset, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = readl(rcar->phy_base + offset);
+ tmp &= ~mask;
+ tmp |= val;
+ writel(tmp, rcar->phy_base + offset);
+}
+
+/*
+ * SoC datasheet suggests checking port logic register bits during firmware
+ * write. If read returns non-zero value, then this function returns -EAGAIN
+ * indicating that the write needs to be done again. If read returns zero,
+ * then return 0 to indicate success.
+ */
+static int rcar_gen4_pcie_reg_test_bit(struct rcar_gen4_pcie *rcar,
+ u32 offset, u32 mask)
+{
+ struct dw_pcie *dw = &rcar->dw;
+
+ if (dw_pcie_readl_dbi(dw, offset) & mask)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int rcar_gen4_pcie_download_phy_firmware(struct rcar_gen4_pcie *rcar)
+{
+ /* The check_addr values are magical numbers in the datasheet */
+ static const u32 check_addr[] = {
+ 0x00101018,
+ 0x00101118,
+ 0x00101021,
+ 0x00101121,
+ };
+ struct dw_pcie *dw = &rcar->dw;
+ const struct firmware *fw;
+ unsigned int i, timeout;
+ u32 data;
+ int ret;
+
+ ret = request_firmware(&fw, RCAR_GEN4_PCIE_FIRMWARE_NAME, dw->dev);
+ if (ret) {
+ dev_err(dw->dev, "Failed to load firmware (%s): %d\n",
+ RCAR_GEN4_PCIE_FIRMWARE_NAME, ret);
+ return ret;
+ }
+
+ for (i = 0; i < (fw->size / 2); i++) {
+ data = fw->data[(i * 2) + 1] << 8 | fw->data[i * 2];
+ timeout = 100;
+ do {
+ dw_pcie_writel_dbi(dw, PRTLGC89, RCAR_GEN4_PCIE_FIRMWARE_BASE_ADDR + i);
+ dw_pcie_writel_dbi(dw, PRTLGC90, data);
+ if (!rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC89, BIT(30)))
+ break;
+ if (!(--timeout)) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ usleep_range(100, 200);
+ } while (1);
+ }
+
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(17), BIT(17));
+
+ for (i = 0; i < ARRAY_SIZE(check_addr); i++) {
+ timeout = 100;
+ do {
+ dw_pcie_writel_dbi(dw, PRTLGC89, check_addr[i]);
+ ret = rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC89, BIT(30));
+ ret |= rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC90, BIT(0));
+ if (!ret)
+ break;
+ if (!(--timeout)) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ usleep_range(100, 200);
+ } while (1);
+ }
+
+exit:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable)
+{
+ struct dw_pcie *dw = &rcar->dw;
+ u32 val;
+ int ret;
+
+ if (!enable) {
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val &= ~APP_LTSSM_ENABLE;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+ }
+
+ val = dw_pcie_readl_dbi(dw, PCIE_PORT_FORCE);
+ val |= PORT_FORCE_DO_DESKEW_FOR_SRIS;
+ dw_pcie_writel_dbi(dw, PCIE_PORT_FORCE, val);
+
+ val = readl(rcar->base + PCIEMSR0);
+ val |= APP_SRIS_MODE;
+ writel(val, rcar->base + PCIEMSR0);
+
+ /*
+ * The R-Car Gen4 datasheet doesn't describe the PHY registers' name.
+ * But, the initialization procedure describes these offsets. So,
+ * this driver has magical offset numbers.
+ */
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(28), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(20), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(12), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(4), 0);
+
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(23, 22), BIT(22));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(18, 16), GENMASK(17, 16));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(7, 6), BIT(6));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(1, 0));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x1d4, GENMASK(16, 15), GENMASK(16, 15));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x514, BIT(26), BIT(26));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(16), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(19), BIT(19));
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val &= ~APP_HOLD_PHY_RST;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, val & BIT(18), 100, 10000);
+ if (ret < 0)
+ return ret;
+
+ ret = rcar_gen4_pcie_download_phy_firmware(rcar);
+ if (ret)
+ return ret;
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val |= APP_LTSSM_ENABLE;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+}
+
+static struct rcar_gen4_pcie_drvdata drvdata_r8a779f0_pcie = {
+ .ltssm_control = r8a779f0_pcie_ltssm_control,
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_r8a779f0_pcie_ep = {
+ .ltssm_control = r8a779f0_pcie_ltssm_control,
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_rcar_gen4_pcie = {
+ .additional_common_init = rcar_gen4_pcie_additional_common_init,
+ .ltssm_control = rcar_gen4_pcie_ltssm_control,
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_rcar_gen4_pcie_ep = {
+ .additional_common_init = rcar_gen4_pcie_additional_common_init,
+ .ltssm_control = rcar_gen4_pcie_ltssm_control,
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id rcar_gen4_pcie_of_match[] = {
+ {
+ .compatible = "renesas,r8a779f0-pcie",
+ .data = &drvdata_r8a779f0_pcie,
+ },
+ {
+ .compatible = "renesas,r8a779f0-pcie-ep",
+ .data = &drvdata_r8a779f0_pcie_ep,
+ },
+ {
+ .compatible = "renesas,rcar-gen4-pcie",
+ .data = &drvdata_rcar_gen4_pcie,
+ },
+ {
+ .compatible = "renesas,rcar-gen4-pcie-ep",
+ .data = &drvdata_rcar_gen4_pcie_ep,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rcar_gen4_pcie_of_match);
+
+static struct platform_driver rcar_gen4_pcie_driver = {
+ .driver = {
+ .name = "pcie-rcar-gen4",
+ .of_match_table = rcar_gen4_pcie_of_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = rcar_gen4_pcie_probe,
+ .remove = rcar_gen4_pcie_remove,
+};
+module_platform_driver(rcar_gen4_pcie_driver);
+
+MODULE_DESCRIPTION("Renesas R-Car Gen4 PCIe controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/controller/dwc/pcie-sophgo.c b/drivers/pci/controller/dwc/pcie-sophgo.c
new file mode 100644
index 000000000000..ad4baaa34ffa
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-sophgo.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo DesignWare based PCIe host controller driver
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+
+#include "pcie-designware.h"
+
+#define to_sophgo_pcie(x) dev_get_drvdata((x)->dev)
+
+#define PCIE_INT_SIGNAL 0xc48
+#define PCIE_INT_EN 0xca0
+
+#define PCIE_INT_SIGNAL_INTX GENMASK(8, 5)
+
+#define PCIE_INT_EN_INTX GENMASK(4, 1)
+#define PCIE_INT_EN_INT_MSI BIT(5)
+
+struct sophgo_pcie {
+ struct dw_pcie pci;
+ void __iomem *app_base;
+ struct clk_bulk_data *clks;
+ unsigned int clk_cnt;
+ struct irq_domain *irq_domain;
+};
+
+static int sophgo_pcie_readl_app(struct sophgo_pcie *sophgo, u32 reg)
+{
+ return readl_relaxed(sophgo->app_base + reg);
+}
+
+static void sophgo_pcie_writel_app(struct sophgo_pcie *sophgo, u32 val, u32 reg)
+{
+ writel_relaxed(val, sophgo->app_base + reg);
+}
+
+static void sophgo_pcie_intx_handler(struct irq_desc *desc)
+{
+ struct dw_pcie_rp *pp = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ unsigned long hwirq, reg;
+
+ chained_irq_enter(chip, desc);
+
+ reg = sophgo_pcie_readl_app(sophgo, PCIE_INT_SIGNAL);
+ reg = FIELD_GET(PCIE_INT_SIGNAL_INTX, reg);
+
+ for_each_set_bit(hwirq, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(sophgo->irq_domain, hwirq);
+
+ chained_irq_exit(chip, desc);
+}
+
+static void sophgo_intx_irq_mask(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN);
+ val &= ~FIELD_PREP(PCIE_INT_EN_INTX, BIT(d->hwirq));
+ sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+};
+
+static void sophgo_intx_irq_unmask(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN);
+ val |= FIELD_PREP(PCIE_INT_EN_INTX, BIT(d->hwirq));
+ sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+};
+
+static struct irq_chip sophgo_intx_irq_chip = {
+ .name = "INTx",
+ .irq_mask = sophgo_intx_irq_mask,
+ .irq_unmask = sophgo_intx_irq_unmask,
+};
+
+static int sophgo_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &sophgo_intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = sophgo_pcie_intx_map,
+};
+
+static int sophgo_pcie_init_irq_domain(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ struct device *dev = sophgo->pci.dev;
+ struct fwnode_handle *intc;
+ int irq;
+
+ intc = device_get_named_child_node(dev, "interrupt-controller");
+ if (!intc) {
+ dev_err(dev, "missing child interrupt-controller node\n");
+ return -ENODEV;
+ }
+
+ irq = fwnode_irq_get(intc, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to get INTx irq number\n");
+ fwnode_handle_put(intc);
+ return irq;
+ }
+
+ sophgo->irq_domain = irq_domain_create_linear(intc, PCI_NUM_INTX,
+ &intx_domain_ops, pp);
+ fwnode_handle_put(intc);
+ if (!sophgo->irq_domain) {
+ dev_err(dev, "failed to get a INTx irq domain\n");
+ return -EINVAL;
+ }
+
+ return irq;
+}
+
+static void sophgo_pcie_msi_enable(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN);
+ val |= PCIE_INT_EN_INT_MSI;
+ sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static int sophgo_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ int irq;
+
+ irq = sophgo_pcie_init_irq_domain(pp);
+ if (irq < 0)
+ return irq;
+
+ irq_set_chained_handler_and_data(irq, sophgo_pcie_intx_handler, pp);
+
+ sophgo_pcie_msi_enable(pp);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops sophgo_pcie_host_ops = {
+ .init = sophgo_pcie_host_init,
+};
+
+static int sophgo_pcie_clk_init(struct sophgo_pcie *sophgo)
+{
+ struct device *dev = sophgo->pci.dev;
+ int ret;
+
+ ret = devm_clk_bulk_get_all_enabled(dev, &sophgo->clks);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get clocks\n");
+
+ sophgo->clk_cnt = ret;
+
+ return 0;
+}
+
+static int sophgo_pcie_resource_get(struct platform_device *pdev,
+ struct sophgo_pcie *sophgo)
+{
+ sophgo->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
+ if (IS_ERR(sophgo->app_base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(sophgo->app_base),
+ "failed to map app registers\n");
+
+ return 0;
+}
+
+static int sophgo_pcie_configure_rc(struct sophgo_pcie *sophgo)
+{
+ struct dw_pcie_rp *pp;
+
+ pp = &sophgo->pci.pp;
+ pp->ops = &sophgo_pcie_host_ops;
+
+ return dw_pcie_host_init(pp);
+}
+
+static int sophgo_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sophgo_pcie *sophgo;
+ int ret;
+
+ sophgo = devm_kzalloc(dev, sizeof(*sophgo), GFP_KERNEL);
+ if (!sophgo)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sophgo);
+
+ sophgo->pci.dev = dev;
+
+ ret = sophgo_pcie_resource_get(pdev, sophgo);
+ if (ret)
+ return ret;
+
+ ret = sophgo_pcie_clk_init(sophgo);
+ if (ret)
+ return ret;
+
+ return sophgo_pcie_configure_rc(sophgo);
+}
+
+static const struct of_device_id sophgo_pcie_of_match[] = {
+ { .compatible = "sophgo,sg2044-pcie" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sophgo_pcie_of_match);
+
+static struct platform_driver sophgo_pcie_driver = {
+ .driver = {
+ .name = "sophgo-pcie",
+ .of_match_table = sophgo_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = sophgo_pcie_probe,
+};
+builtin_platform_driver(sophgo_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-spacemit-k1.c b/drivers/pci/controller/dwc/pcie-spacemit-k1.c
new file mode 100644
index 000000000000..be20a520255b
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-spacemit-k1.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SpacemiT K1 PCIe host driver
+ *
+ * Copyright (C) 2025 by RISCstar Solutions Corporation. All rights reserved.
+ * Copyright (c) 2023, spacemit Corporation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define PCI_VENDOR_ID_SPACEMIT 0x201f
+#define PCI_DEVICE_ID_SPACEMIT_K1 0x0001
+
+/* Offsets and field definitions for link management registers */
+#define K1_PHY_AHB_IRQ_EN 0x0000
+#define PCIE_INTERRUPT_EN BIT(0)
+
+#define K1_PHY_AHB_LINK_STS 0x0004
+#define SMLH_LINK_UP BIT(1)
+#define RDLH_LINK_UP BIT(12)
+
+#define INTR_ENABLE 0x0014
+#define MSI_CTRL_INT BIT(11)
+
+/* Some controls require APMU regmap access */
+#define SYSCON_APMU "spacemit,apmu"
+
+/* Offsets and field definitions for APMU registers */
+#define PCIE_CLK_RESET_CONTROL 0x0000
+#define LTSSM_EN BIT(6)
+#define PCIE_AUX_PWR_DET BIT(9)
+#define PCIE_RC_PERST BIT(12) /* 1: assert PERST# */
+#define APP_HOLD_PHY_RST BIT(30)
+#define DEVICE_TYPE_RC BIT(31) /* 0: endpoint; 1: RC */
+
+#define PCIE_CONTROL_LOGIC 0x0004
+#define PCIE_SOFT_RESET BIT(0)
+
+struct k1_pcie {
+ struct dw_pcie pci;
+ struct phy *phy;
+ void __iomem *link;
+ struct regmap *pmu; /* Errors ignored; MMIO-backed regmap */
+ u32 pmu_off;
+};
+
+#define to_k1_pcie(dw_pcie) \
+ platform_get_drvdata(to_platform_device((dw_pcie)->dev))
+
+static void k1_pcie_toggle_soft_reset(struct k1_pcie *k1)
+{
+ u32 offset;
+ u32 val;
+
+ /*
+ * Write, then read back to guarantee it has reached the device
+ * before we start the delay.
+ */
+ offset = k1->pmu_off + PCIE_CONTROL_LOGIC;
+ regmap_set_bits(k1->pmu, offset, PCIE_SOFT_RESET);
+ regmap_read(k1->pmu, offset, &val);
+
+ mdelay(2);
+
+ regmap_clear_bits(k1->pmu, offset, PCIE_SOFT_RESET);
+}
+
+/* Enable app clocks, deassert resets */
+static int k1_pcie_enable_resources(struct k1_pcie *k1)
+{
+ struct dw_pcie *pci = &k1->pci;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(pci->app_clks), pci->app_clks);
+ if (ret)
+ return ret;
+
+ ret = reset_control_bulk_deassert(ARRAY_SIZE(pci->app_rsts),
+ pci->app_rsts);
+ if (ret)
+ goto err_disable_clks;
+
+ return 0;
+
+err_disable_clks:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(pci->app_clks), pci->app_clks);
+
+ return ret;
+}
+
+/* Assert resets, disable app clocks */
+static void k1_pcie_disable_resources(struct k1_pcie *k1)
+{
+ struct dw_pcie *pci = &k1->pci;
+
+ reset_control_bulk_assert(ARRAY_SIZE(pci->app_rsts), pci->app_rsts);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(pci->app_clks), pci->app_clks);
+}
+
+/* FIXME: Disable ASPM L1 to avoid errors reported on some NVMe drives */
+static void k1_pcie_disable_aspm_l1(struct k1_pcie *k1)
+{
+ struct dw_pcie *pci = &k1->pci;
+ u8 offset;
+ u32 val;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ offset += PCI_EXP_LNKCAP;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ val = dw_pcie_readl_dbi(pci, offset);
+ val &= ~PCI_EXP_LNKCAP_ASPM_L1;
+ dw_pcie_writel_dbi(pci, offset, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static int k1_pcie_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct k1_pcie *k1 = to_k1_pcie(pci);
+ u32 reset_ctrl;
+ u32 val;
+ int ret;
+
+ k1_pcie_toggle_soft_reset(k1);
+
+ ret = k1_pcie_enable_resources(k1);
+ if (ret)
+ return ret;
+
+ /* Set the PCI vendor and device ID */
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, PCI_VENDOR_ID_SPACEMIT);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, PCI_DEVICE_ID_SPACEMIT_K1);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ /*
+ * Start by asserting fundamental reset (drive PERST# low). The
+ * PCI CEM spec says that PERST# should be deasserted at least
+ * 100ms after the power becomes stable, so we'll insert that
+ * delay first. Write, then read it back to guarantee the write
+ * reaches the device before we start the delay.
+ */
+ reset_ctrl = k1->pmu_off + PCIE_CLK_RESET_CONTROL;
+ regmap_set_bits(k1->pmu, reset_ctrl, PCIE_RC_PERST);
+ regmap_read(k1->pmu, reset_ctrl, &val);
+ mdelay(PCIE_T_PVPERL_MS);
+
+ /*
+ * Put the controller in root complex mode, and indicate that
+ * Vaux (3.3v) is present.
+ */
+ regmap_set_bits(k1->pmu, reset_ctrl, DEVICE_TYPE_RC | PCIE_AUX_PWR_DET);
+
+ ret = phy_init(k1->phy);
+ if (ret) {
+ k1_pcie_disable_resources(k1);
+
+ return ret;
+ }
+
+ /* Deassert fundamental reset (drive PERST# high) */
+ regmap_clear_bits(k1->pmu, reset_ctrl, PCIE_RC_PERST);
+
+ /* Finally, as a workaround, disable ASPM L1 */
+ k1_pcie_disable_aspm_l1(k1);
+
+ return 0;
+}
+
+static void k1_pcie_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct k1_pcie *k1 = to_k1_pcie(pci);
+
+ /* Assert fundamental reset (drive PERST# low) */
+ regmap_set_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL,
+ PCIE_RC_PERST);
+
+ phy_exit(k1->phy);
+
+ k1_pcie_disable_resources(k1);
+}
+
+static const struct dw_pcie_host_ops k1_pcie_host_ops = {
+ .init = k1_pcie_init,
+ .deinit = k1_pcie_deinit,
+};
+
+static bool k1_pcie_link_up(struct dw_pcie *pci)
+{
+ struct k1_pcie *k1 = to_k1_pcie(pci);
+ u32 val;
+
+ val = readl_relaxed(k1->link + K1_PHY_AHB_LINK_STS);
+
+ return (val & RDLH_LINK_UP) && (val & SMLH_LINK_UP);
+}
+
+static int k1_pcie_start_link(struct dw_pcie *pci)
+{
+ struct k1_pcie *k1 = to_k1_pcie(pci);
+ u32 val;
+
+ /* Stop holding the PHY in reset, and enable link training */
+ regmap_update_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL,
+ APP_HOLD_PHY_RST | LTSSM_EN, LTSSM_EN);
+
+ /* Enable the MSI interrupt */
+ writel_relaxed(MSI_CTRL_INT, k1->link + INTR_ENABLE);
+
+ /* Top-level interrupt enable */
+ val = readl_relaxed(k1->link + K1_PHY_AHB_IRQ_EN);
+ val |= PCIE_INTERRUPT_EN;
+ writel_relaxed(val, k1->link + K1_PHY_AHB_IRQ_EN);
+
+ return 0;
+}
+
+static void k1_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct k1_pcie *k1 = to_k1_pcie(pci);
+ u32 val;
+
+ /* Disable interrupts */
+ val = readl_relaxed(k1->link + K1_PHY_AHB_IRQ_EN);
+ val &= ~PCIE_INTERRUPT_EN;
+ writel_relaxed(val, k1->link + K1_PHY_AHB_IRQ_EN);
+
+ writel_relaxed(0, k1->link + INTR_ENABLE);
+
+ /* Disable the link and hold the PHY in reset */
+ regmap_update_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL,
+ APP_HOLD_PHY_RST | LTSSM_EN, APP_HOLD_PHY_RST);
+}
+
+static const struct dw_pcie_ops k1_pcie_ops = {
+ .link_up = k1_pcie_link_up,
+ .start_link = k1_pcie_start_link,
+ .stop_link = k1_pcie_stop_link,
+};
+
+static int k1_pcie_parse_port(struct k1_pcie *k1)
+{
+ struct device *dev = k1->pci.dev;
+ struct device_node *root_port;
+ struct phy *phy;
+
+ /* We assume only one root port */
+ root_port = of_get_next_available_child(dev_of_node(dev), NULL);
+ if (!root_port)
+ return -EINVAL;
+
+ phy = devm_of_phy_get(dev, root_port, NULL);
+
+ of_node_put(root_port);
+
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ k1->phy = phy;
+
+ return 0;
+}
+
+static int k1_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct k1_pcie *k1;
+ int ret;
+
+ k1 = devm_kzalloc(dev, sizeof(*k1), GFP_KERNEL);
+ if (!k1)
+ return -ENOMEM;
+
+ k1->pmu = syscon_regmap_lookup_by_phandle_args(dev_of_node(dev),
+ SYSCON_APMU, 1,
+ &k1->pmu_off);
+ if (IS_ERR(k1->pmu))
+ return dev_err_probe(dev, PTR_ERR(k1->pmu),
+ "failed to lookup PMU registers\n");
+
+ k1->link = devm_platform_ioremap_resource_byname(pdev, "link");
+ if (IS_ERR(k1->link))
+ return dev_err_probe(dev, PTR_ERR(k1->link),
+ "failed to map \"link\" registers\n");
+
+ k1->pci.dev = dev;
+ k1->pci.ops = &k1_pcie_ops;
+ k1->pci.pp.num_vectors = MAX_MSI_IRQS;
+ dw_pcie_cap_set(&k1->pci, REQ_RES);
+
+ k1->pci.pp.ops = &k1_pcie_host_ops;
+
+ /* Hold the PHY in reset until we start the link */
+ regmap_set_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL,
+ APP_HOLD_PHY_RST);
+
+ ret = devm_regulator_get_enable(dev, "vpcie3v3");
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get \"vpcie3v3\" supply\n");
+
+ pm_runtime_set_active(dev);
+ pm_runtime_no_callbacks(dev);
+ devm_pm_runtime_enable(dev);
+
+ platform_set_drvdata(pdev, k1);
+
+ ret = k1_pcie_parse_port(k1);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to parse root port\n");
+
+ ret = dw_pcie_host_init(&k1->pci.pp);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize host\n");
+
+ return 0;
+}
+
+static void k1_pcie_remove(struct platform_device *pdev)
+{
+ struct k1_pcie *k1 = platform_get_drvdata(pdev);
+
+ dw_pcie_host_deinit(&k1->pci.pp);
+}
+
+static const struct of_device_id k1_pcie_of_match_table[] = {
+ { .compatible = "spacemit,k1-pcie", },
+ { }
+};
+
+static struct platform_driver k1_pcie_driver = {
+ .probe = k1_pcie_probe,
+ .remove = k1_pcie_remove,
+ .driver = {
+ .name = "spacemit-k1-pcie",
+ .of_match_table = k1_pcie_of_match_table,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(k1_pcie_driver);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SpacemiT K1 PCIe host driver");
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index 99d47ae80331..01794a9d3ad2 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -110,15 +110,12 @@ static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pc
MSI_CTRL_INT, &app_reg->int_mask);
}
-static int spear13xx_pcie_link_up(struct dw_pcie *pci)
+static bool spear13xx_pcie_link_up(struct dw_pcie *pci)
{
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
- if (readl(&app_reg->app_status_1) & XMLH_LINK_UP)
- return 1;
-
- return 0;
+ return readl(&app_reg->app_status_1) & XMLH_LINK_UP;
}
static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)
@@ -148,7 +145,7 @@ static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
- .host_init = spear13xx_pcie_host_init,
+ .init = spear13xx_pcie_host_init,
};
static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
@@ -233,7 +230,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
}
if (of_property_read_bool(np, "st,pcie-is-gen1"))
- pci->link_gen = 1;
+ pci->max_link_speed = 1;
platform_set_drvdata(pdev, spear13xx_pcie);
diff --git a/drivers/pci/controller/dwc/pcie-stm32-ep.c b/drivers/pci/controller/dwc/pcie-stm32-ep.c
new file mode 100644
index 000000000000..2cecf32d2b0f
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-stm32-ep.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * STMicroelectronics STM32MP25 PCIe endpoint driver.
+ *
+ * Copyright (C) 2025 STMicroelectronics
+ * Author: Christian Bruel <christian.bruel@foss.st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include "pcie-designware.h"
+#include "pcie-stm32.h"
+
+struct stm32_pcie {
+ struct dw_pcie pci;
+ struct regmap *regmap;
+ struct reset_control *rst;
+ struct phy *phy;
+ struct clk *clk;
+ struct gpio_desc *perst_gpio;
+ unsigned int perst_irq;
+};
+
+static void stm32_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int stm32_pcie_start_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ enable_irq(stm32_pcie->perst_irq);
+
+ return 0;
+}
+
+static void stm32_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ disable_irq(stm32_pcie->perst_irq);
+}
+
+static int stm32_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+}
+
+static const struct pci_epc_features stm32_pcie_epc_features = {
+ .msi_capable = true,
+ .align = SZ_64K,
+};
+
+static const struct pci_epc_features*
+stm32_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &stm32_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops stm32_pcie_ep_ops = {
+ .init = stm32_pcie_ep_init,
+ .raise_irq = stm32_pcie_raise_irq,
+ .get_features = stm32_pcie_get_features,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = stm32_pcie_start_link,
+ .stop_link = stm32_pcie_stop_link,
+};
+
+static int stm32_pcie_enable_resources(struct stm32_pcie *stm32_pcie)
+{
+ int ret;
+
+ ret = phy_init(stm32_pcie->phy);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(stm32_pcie->clk);
+ if (ret)
+ phy_exit(stm32_pcie->phy);
+
+ return ret;
+}
+
+static void stm32_pcie_disable_resources(struct stm32_pcie *stm32_pcie)
+{
+ clk_disable_unprepare(stm32_pcie->clk);
+
+ phy_exit(stm32_pcie->phy);
+}
+
+static void stm32_pcie_perst_assert(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+ struct dw_pcie_ep *ep = &stm32_pcie->pci.ep;
+ struct device *dev = pci->dev;
+
+ dev_dbg(dev, "PERST asserted by host\n");
+
+ regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN, 0);
+
+ pci_epc_deinit_notify(ep->epc);
+
+ stm32_pcie_disable_resources(stm32_pcie);
+
+ pm_runtime_put_sync(dev);
+}
+
+static void stm32_pcie_perst_deassert(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+ struct device *dev = pci->dev;
+ struct dw_pcie_ep *ep = &pci->ep;
+ int ret;
+
+ dev_dbg(dev, "PERST de-asserted by host\n");
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to resume runtime PM: %d\n", ret);
+ return;
+ }
+
+ ret = stm32_pcie_enable_resources(stm32_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ goto err_pm_put_sync;
+ }
+
+ /*
+ * Reprogram the configuration space registers here because the DBI
+ * registers were reset by the PHY RCC during phy_init().
+ */
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to complete initialization: %d\n", ret);
+ goto err_disable_resources;
+ }
+
+ pci_epc_init_notify(ep->epc);
+
+ /* Enable link training */
+ regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN,
+ STM32MP25_PCIECR_LTSSM_EN);
+
+ return;
+
+err_disable_resources:
+ stm32_pcie_disable_resources(stm32_pcie);
+
+err_pm_put_sync:
+ pm_runtime_put_sync(dev);
+}
+
+static irqreturn_t stm32_pcie_ep_perst_irq_thread(int irq, void *data)
+{
+ struct stm32_pcie *stm32_pcie = data;
+ struct dw_pcie *pci = &stm32_pcie->pci;
+ u32 perst;
+
+ perst = gpiod_get_value(stm32_pcie->perst_gpio);
+ if (perst)
+ stm32_pcie_perst_assert(pci);
+ else
+ stm32_pcie_perst_deassert(pci);
+
+ irq_set_irq_type(gpiod_to_irq(stm32_pcie->perst_gpio),
+ (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW));
+
+ return IRQ_HANDLED;
+}
+
+static int stm32_add_pcie_ep(struct stm32_pcie *stm32_pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie_ep *ep = &stm32_pcie->pci.ep;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_TYPE_MASK,
+ STM32MP25_PCIECR_EP);
+ if (ret)
+ return ret;
+
+ reset_control_assert(stm32_pcie->rst);
+ reset_control_deassert(stm32_pcie->rst);
+
+ ep->ops = &stm32_pcie_ep_ops;
+
+ ep->page_size = stm32_pcie_epc_features.align;
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize ep: %d\n", ret);
+ return ret;
+ }
+
+ ret = stm32_pcie_enable_resources(stm32_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ dw_pcie_ep_deinit(ep);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stm32_pcie_probe(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ stm32_pcie = devm_kzalloc(dev, sizeof(*stm32_pcie), GFP_KERNEL);
+ if (!stm32_pcie)
+ return -ENOMEM;
+
+ stm32_pcie->pci.dev = dev;
+ stm32_pcie->pci.ops = &dw_pcie_ops;
+
+ stm32_pcie->regmap = syscon_regmap_lookup_by_compatible("st,stm32mp25-syscfg");
+ if (IS_ERR(stm32_pcie->regmap))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->regmap),
+ "No syscfg specified\n");
+
+ stm32_pcie->phy = devm_phy_get(dev, NULL);
+ if (IS_ERR(stm32_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->phy),
+ "failed to get pcie-phy\n");
+
+ stm32_pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(stm32_pcie->clk))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->clk),
+ "Failed to get PCIe clock source\n");
+
+ stm32_pcie->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(stm32_pcie->rst))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->rst),
+ "Failed to get PCIe reset\n");
+
+ stm32_pcie->perst_gpio = devm_gpiod_get(dev, "reset", GPIOD_IN);
+ if (IS_ERR(stm32_pcie->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->perst_gpio),
+ "Failed to get reset GPIO\n");
+
+ ret = phy_set_mode(stm32_pcie->phy, PHY_MODE_PCIE);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, stm32_pcie);
+
+ pm_runtime_get_noresume(dev);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+ }
+
+ stm32_pcie->perst_irq = gpiod_to_irq(stm32_pcie->perst_gpio);
+
+ /* Will be enabled in start_link when device is initialized. */
+ irq_set_status_flags(stm32_pcie->perst_irq, IRQ_NOAUTOEN);
+
+ ret = devm_request_threaded_irq(dev, stm32_pcie->perst_irq, NULL,
+ stm32_pcie_ep_perst_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "perst_irq", stm32_pcie);
+ if (ret) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return dev_err_probe(dev, ret, "Failed to request PERST IRQ\n");
+ }
+
+ ret = stm32_add_pcie_ep(stm32_pcie, pdev);
+ if (ret)
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return ret;
+}
+
+static void stm32_pcie_remove(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie = platform_get_drvdata(pdev);
+ struct dw_pcie *pci = &stm32_pcie->pci;
+ struct dw_pcie_ep *ep = &pci->ep;
+
+ dw_pcie_stop_link(pci);
+
+ pci_epc_deinit_notify(ep->epc);
+ dw_pcie_ep_deinit(ep);
+
+ stm32_pcie_disable_resources(stm32_pcie);
+
+ pm_runtime_put_sync(&pdev->dev);
+}
+
+static const struct of_device_id stm32_pcie_ep_of_match[] = {
+ { .compatible = "st,stm32mp25-pcie-ep" },
+ {},
+};
+
+static struct platform_driver stm32_pcie_ep_driver = {
+ .probe = stm32_pcie_probe,
+ .remove = stm32_pcie_remove,
+ .driver = {
+ .name = "stm32-ep-pcie",
+ .of_match_table = stm32_pcie_ep_of_match,
+ },
+};
+
+module_platform_driver(stm32_pcie_ep_driver);
+
+MODULE_AUTHOR("Christian Bruel <christian.bruel@foss.st.com>");
+MODULE_DESCRIPTION("STM32MP25 PCIe Endpoint Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, stm32_pcie_ep_of_match);
diff --git a/drivers/pci/controller/dwc/pcie-stm32.c b/drivers/pci/controller/dwc/pcie-stm32.c
new file mode 100644
index 000000000000..a9e77478443b
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-stm32.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * STMicroelectronics STM32MP25 PCIe root complex driver.
+ *
+ * Copyright (C) 2025 STMicroelectronics
+ * Author: Christian Bruel <christian.bruel@foss.st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/stddef.h>
+
+#include "../../pci.h"
+
+#include "pcie-designware.h"
+#include "pcie-stm32.h"
+
+struct stm32_pcie {
+ struct dw_pcie pci;
+ struct regmap *regmap;
+ struct reset_control *rst;
+ struct phy *phy;
+ struct clk *clk;
+ struct gpio_desc *perst_gpio;
+ struct gpio_desc *wake_gpio;
+};
+
+static void stm32_pcie_deassert_perst(struct stm32_pcie *stm32_pcie)
+{
+ if (stm32_pcie->perst_gpio) {
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value(stm32_pcie->perst_gpio, 0);
+ }
+
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+}
+
+static void stm32_pcie_assert_perst(struct stm32_pcie *stm32_pcie)
+{
+ gpiod_set_value(stm32_pcie->perst_gpio, 1);
+}
+
+static int stm32_pcie_start_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ return regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN,
+ STM32MP25_PCIECR_LTSSM_EN);
+}
+
+static void stm32_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN, 0);
+}
+
+static int stm32_pcie_suspend_noirq(struct device *dev)
+{
+ struct stm32_pcie *stm32_pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dw_pcie_suspend_noirq(&stm32_pcie->pci);
+ if (ret)
+ return ret;
+
+ stm32_pcie_assert_perst(stm32_pcie);
+
+ clk_disable_unprepare(stm32_pcie->clk);
+
+ if (!device_wakeup_path(dev))
+ phy_exit(stm32_pcie->phy);
+
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int stm32_pcie_resume_noirq(struct device *dev)
+{
+ struct stm32_pcie *stm32_pcie = dev_get_drvdata(dev);
+ int ret;
+
+ /*
+ * The core clock is gated with CLKREQ# from the COMBOPHY REFCLK,
+ * thus if no device is present, must deassert it with a GPIO from
+ * pinctrl pinmux before accessing the DBI registers.
+ */
+ ret = pinctrl_pm_select_init_state(dev);
+ if (ret) {
+ dev_err(dev, "Failed to activate pinctrl pm state: %d\n", ret);
+ return ret;
+ }
+
+ if (!device_wakeup_path(dev)) {
+ ret = phy_init(stm32_pcie->phy);
+ if (ret) {
+ pinctrl_pm_select_default_state(dev);
+ return ret;
+ }
+ }
+
+ ret = clk_prepare_enable(stm32_pcie->clk);
+ if (ret)
+ goto err_phy_exit;
+
+ stm32_pcie_deassert_perst(stm32_pcie);
+
+ ret = dw_pcie_resume_noirq(&stm32_pcie->pci);
+ if (ret)
+ goto err_disable_clk;
+
+ pinctrl_pm_select_default_state(dev);
+
+ return 0;
+
+err_disable_clk:
+ stm32_pcie_assert_perst(stm32_pcie);
+ clk_disable_unprepare(stm32_pcie->clk);
+
+err_phy_exit:
+ phy_exit(stm32_pcie->phy);
+ pinctrl_pm_select_default_state(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops stm32_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(stm32_pcie_suspend_noirq,
+ stm32_pcie_resume_noirq)
+};
+
+static const struct dw_pcie_host_ops stm32_pcie_host_ops = {
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = stm32_pcie_start_link,
+ .stop_link = stm32_pcie_stop_link
+};
+
+static int stm32_add_pcie_port(struct stm32_pcie *stm32_pcie)
+{
+ struct device *dev = stm32_pcie->pci.dev;
+ unsigned int wake_irq;
+ int ret;
+
+ ret = phy_set_mode(stm32_pcie->phy, PHY_MODE_PCIE);
+ if (ret)
+ return ret;
+
+ ret = phy_init(stm32_pcie->phy);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_TYPE_MASK,
+ STM32MP25_PCIECR_RC);
+ if (ret)
+ goto err_phy_exit;
+
+ stm32_pcie_deassert_perst(stm32_pcie);
+
+ if (stm32_pcie->wake_gpio) {
+ wake_irq = gpiod_to_irq(stm32_pcie->wake_gpio);
+ ret = dev_pm_set_dedicated_wake_irq(dev, wake_irq);
+ if (ret) {
+ dev_err(dev, "Failed to enable wakeup irq %d\n", ret);
+ goto err_assert_perst;
+ }
+ irq_set_irq_type(wake_irq, IRQ_TYPE_EDGE_FALLING);
+ }
+
+ return 0;
+
+err_assert_perst:
+ stm32_pcie_assert_perst(stm32_pcie);
+
+err_phy_exit:
+ phy_exit(stm32_pcie->phy);
+
+ return ret;
+}
+
+static void stm32_remove_pcie_port(struct stm32_pcie *stm32_pcie)
+{
+ dev_pm_clear_wake_irq(stm32_pcie->pci.dev);
+
+ stm32_pcie_assert_perst(stm32_pcie);
+
+ phy_exit(stm32_pcie->phy);
+}
+
+static int stm32_pcie_parse_port(struct stm32_pcie *stm32_pcie)
+{
+ struct device *dev = stm32_pcie->pci.dev;
+ struct device_node *root_port;
+
+ root_port = of_get_next_available_child(dev->of_node, NULL);
+
+ stm32_pcie->phy = devm_of_phy_get(dev, root_port, NULL);
+ if (IS_ERR(stm32_pcie->phy)) {
+ of_node_put(root_port);
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->phy),
+ "Failed to get pcie-phy\n");
+ }
+
+ stm32_pcie->perst_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(root_port),
+ "reset", GPIOD_OUT_HIGH, NULL);
+ if (IS_ERR(stm32_pcie->perst_gpio)) {
+ if (PTR_ERR(stm32_pcie->perst_gpio) != -ENOENT) {
+ of_node_put(root_port);
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->perst_gpio),
+ "Failed to get reset GPIO\n");
+ }
+ stm32_pcie->perst_gpio = NULL;
+ }
+
+ stm32_pcie->wake_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(root_port),
+ "wake", GPIOD_IN, NULL);
+
+ if (IS_ERR(stm32_pcie->wake_gpio)) {
+ if (PTR_ERR(stm32_pcie->wake_gpio) != -ENOENT) {
+ of_node_put(root_port);
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->wake_gpio),
+ "Failed to get wake GPIO\n");
+ }
+ stm32_pcie->wake_gpio = NULL;
+ }
+
+ of_node_put(root_port);
+
+ return 0;
+}
+
+static int stm32_pcie_probe(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ stm32_pcie = devm_kzalloc(dev, sizeof(*stm32_pcie), GFP_KERNEL);
+ if (!stm32_pcie)
+ return -ENOMEM;
+
+ stm32_pcie->pci.dev = dev;
+ stm32_pcie->pci.ops = &dw_pcie_ops;
+ stm32_pcie->pci.pp.ops = &stm32_pcie_host_ops;
+
+ stm32_pcie->regmap = syscon_regmap_lookup_by_compatible("st,stm32mp25-syscfg");
+ if (IS_ERR(stm32_pcie->regmap))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->regmap),
+ "No syscfg specified\n");
+
+ stm32_pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(stm32_pcie->clk))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->clk),
+ "Failed to get PCIe clock source\n");
+
+ stm32_pcie->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(stm32_pcie->rst))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->rst),
+ "Failed to get PCIe reset\n");
+
+ ret = stm32_pcie_parse_port(stm32_pcie);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, stm32_pcie);
+
+ ret = stm32_add_pcie_port(stm32_pcie);
+ if (ret)
+ return ret;
+
+ reset_control_assert(stm32_pcie->rst);
+ reset_control_deassert(stm32_pcie->rst);
+
+ ret = clk_prepare_enable(stm32_pcie->clk);
+ if (ret) {
+ dev_err(dev, "Core clock enable failed %d\n", ret);
+ goto err_remove_port;
+ }
+
+ ret = pm_runtime_set_active(dev);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to activate runtime PM\n");
+ goto err_disable_clk;
+ }
+
+ pm_runtime_no_callbacks(dev);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+ goto err_disable_clk;
+ }
+
+ ret = dw_pcie_host_init(&stm32_pcie->pci.pp);
+ if (ret)
+ goto err_disable_clk;
+
+ if (stm32_pcie->wake_gpio)
+ device_init_wakeup(dev, true);
+
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(stm32_pcie->clk);
+
+err_remove_port:
+ stm32_remove_pcie_port(stm32_pcie);
+
+ return ret;
+}
+
+static void stm32_pcie_remove(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie = platform_get_drvdata(pdev);
+ struct dw_pcie_rp *pp = &stm32_pcie->pci.pp;
+
+ if (stm32_pcie->wake_gpio)
+ device_init_wakeup(&pdev->dev, false);
+
+ dw_pcie_host_deinit(pp);
+
+ clk_disable_unprepare(stm32_pcie->clk);
+
+ stm32_remove_pcie_port(stm32_pcie);
+
+ pm_runtime_put_noidle(&pdev->dev);
+}
+
+static const struct of_device_id stm32_pcie_of_match[] = {
+ { .compatible = "st,stm32mp25-pcie-rc" },
+ {},
+};
+
+static struct platform_driver stm32_pcie_driver = {
+ .probe = stm32_pcie_probe,
+ .remove = stm32_pcie_remove,
+ .driver = {
+ .name = "stm32-pcie",
+ .of_match_table = stm32_pcie_of_match,
+ .pm = &stm32_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+module_platform_driver(stm32_pcie_driver);
+
+MODULE_AUTHOR("Christian Bruel <christian.bruel@foss.st.com>");
+MODULE_DESCRIPTION("STM32MP25 PCIe Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, stm32_pcie_of_match);
diff --git a/drivers/pci/controller/dwc/pcie-stm32.h b/drivers/pci/controller/dwc/pcie-stm32.h
new file mode 100644
index 000000000000..419cf1ff669d
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-stm32.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ST PCIe driver definitions for STM32-MP25 SoC
+ *
+ * Copyright (C) 2025 STMicroelectronics - All Rights Reserved
+ * Author: Christian Bruel <christian.bruel@foss.st.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+
+#define to_stm32_pcie(x) dev_get_drvdata((x)->dev)
+
+#define STM32MP25_PCIECR_TYPE_MASK GENMASK(11, 8)
+#define STM32MP25_PCIECR_EP 0
+#define STM32MP25_PCIECR_LTSSM_EN BIT(2)
+#define STM32MP25_PCIECR_RC BIT(10)
+
+#define SYSCFG_PCIECR 0x6000
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 02d78a12b6e7..0ddeef70726d 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -9,18 +9,17 @@
* Author: Vidya Sagar <vidyas@nvidia.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
+#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -125,7 +124,7 @@
#define APPL_LTR_MSG_1 0xC4
#define LTR_MSG_REQ BIT(15)
-#define LTR_MST_NO_SNOOP_SHIFT 16
+#define LTR_NOSNOOP_MSG_REQ BIT(31)
#define APPL_LTR_MSG_2 0xC8
#define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3)
@@ -178,17 +177,12 @@
#define N_FTS_VAL 52
#define FTS_VAL 52
-#define GEN3_EQ_CONTROL_OFF 0x8a8
-#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
-#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
-#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
-
#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
-#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
-#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
-#define AMBA_ERROR_RESPONSE_CRS_OKAY 0
-#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
-#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
+#define AMBA_ERROR_RESPONSE_RRS_SHIFT 3
+#define AMBA_ERROR_RESPONSE_RRS_MASK GENMASK(1, 0)
+#define AMBA_ERROR_RESPONSE_RRS_OKAY 0
+#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFFFFFF 1
+#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 2
#define MSIX_ADDR_MATCH_LOW_OFF 0x940
#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
@@ -223,6 +217,7 @@
#define EP_STATE_ENABLED 1
static const unsigned int pcie_gen_freq[] = {
+ GEN1_CORE_CLK_FREQ, /* PCI_EXP_LNKSTA_CLS == 0; undefined */
GEN1_CORE_CLK_FREQ,
GEN2_CORE_CLK_FREQ,
GEN3_CORE_CLK_FREQ,
@@ -265,7 +260,6 @@ struct tegra_pcie_dw {
u32 msi_ctrl_int;
u32 num_lanes;
u32 cid;
- u32 cfg_link_cap_l1sub;
u32 ras_des_cap;
u32 pcie_cap_base;
u32 aspm_cmrt;
@@ -286,6 +280,8 @@ struct tegra_pcie_dw {
struct gpio_desc *pex_refclk_sel_gpiod;
unsigned int pex_rst_irq;
int ep_state;
+ long link_status;
+ struct icc_path *icc_path;
};
static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
@@ -304,9 +300,26 @@ static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
return readl_relaxed(pcie->appl_base + reg);
}
-struct tegra_pcie_soc {
- enum dw_pcie_device_mode mode;
-};
+static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val, speed, width;
+
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+
+ speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, val);
+ width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
+
+ val = width * PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]);
+
+ if (icc_set_bw(pcie->icc_path, Mbps_to_icc(val), 0))
+ dev_err(pcie->dev, "can't set bw[%u]\n", val);
+
+ if (speed >= ARRAY_SIZE(pcie_gen_freq))
+ speed = 0;
+
+ clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]);
+}
static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
{
@@ -322,8 +335,7 @@ static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
*/
val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
if (val & PCI_EXP_LNKSTA_LBMS) {
- current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
- PCI_EXP_LNKSTA_NLW_SHIFT;
+ current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
if (pcie->init_link_width > current_link_width) {
dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
@@ -449,19 +461,20 @@ static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
{
struct tegra_pcie_dw *pcie = arg;
+ struct dw_pcie_ep *ep = &pcie->pci.ep;
struct dw_pcie *pci = &pcie->pci;
- u32 val, speed;
+ u32 val;
- speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
- PCI_EXP_LNKSTA_CLS;
- clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
+ if (test_and_clear_bit(0, &pcie->link_status))
+ dw_pcie_ep_linkup(ep);
+
+ tegra_pcie_icc_set(pcie);
if (pcie->of_data->has_ltr_req_fix)
return IRQ_HANDLED;
/* If EP doesn't advertise L1SS, just return */
- val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
- if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
+ if (!pci->l1ss_support)
return IRQ_HANDLED;
/* Check if BME is set to '1' */
@@ -470,8 +483,12 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
ktime_t timeout;
/* 110us for both snoop and no-snoop */
- val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ;
- val |= (val << LTR_MST_NO_SNOOP_SHIFT);
+ val = FIELD_PREP(PCI_LTR_VALUE_MASK, 110) |
+ FIELD_PREP(PCI_LTR_SCALE_MASK, 2) |
+ LTR_MSG_REQ |
+ FIELD_PREP(PCI_LTR_NOSNOOP_VALUE, 110) |
+ FIELD_PREP(PCI_LTR_NOSNOOP_SCALE, 2) |
+ LTR_NOSNOOP_MSG_REQ;
appl_writel(pcie, val, APPL_LTR_MSG_1);
/* Send LTR upstream */
@@ -498,7 +515,6 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
{
struct tegra_pcie_dw *pcie = arg;
- struct dw_pcie_ep *ep = &pcie->pci.ep;
int spurious = 1;
u32 status_l0, status_l1, link_status;
@@ -514,7 +530,8 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
link_status = appl_readl(pcie, APPL_LINK_STATUS);
if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
dev_dbg(pcie->dev, "Link is up with Host\n");
- dw_pcie_ep_linkup(ep);
+ set_bit(0, &pcie->link_status);
+ return IRQ_WAKE_THREAD;
}
}
@@ -589,24 +606,6 @@ static struct pci_ops tegra_pci_ops = {
};
#if defined(CONFIG_PCIEASPM)
-static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
-{
- u32 val;
-
- val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
- val &= ~PCI_L1SS_CAP_ASPM_L1_1;
- dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
-}
-
-static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
-{
- u32 val;
-
- val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
- val &= ~PCI_L1SS_CAP_ASPM_L1_2;
- dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
-}
-
static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
{
u32 val;
@@ -663,10 +662,9 @@ static int aspm_state_cnt(struct seq_file *s, void *data)
static void init_host_aspm(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
- u32 val;
+ u32 l1ss, val;
- val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
- pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
+ l1ss = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci,
PCI_EXT_CAP_ID_VNDR);
@@ -678,11 +676,14 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie)
PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
/* Program T_cmrt and T_pwr_on values */
- val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
+ val = dw_pcie_readl_dbi(pci, l1ss + PCI_L1SS_CAP);
val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
val |= (pcie->aspm_cmrt << 8);
val |= (pcie->aspm_pwr_on_t << 19);
- dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
+ dw_pcie_writel_dbi(pci, l1ss + PCI_L1SS_CAP, val);
+
+ if (pcie->supports_clkreq)
+ pci->l1ss_support = true;
/* Program L0s and L1 entrance latencies */
val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
@@ -694,12 +695,19 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie)
static void init_debugfs(struct tegra_pcie_dw *pcie)
{
- debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
+ struct device *dev = pcie->dev;
+ char *name;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name)
+ return;
+
+ pcie->debugfs = debugfs_create_dir(name, NULL);
+
+ debugfs_create_devm_seqfile(dev, "aspm_state_cnt", pcie->debugfs,
aspm_state_cnt);
}
#else
-static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
-static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
#endif
@@ -734,8 +742,7 @@ static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
PCI_EXP_LNKSTA);
- pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
- PCI_EXP_LNKSTA_NLW_SHIFT;
+ pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
PCI_EXP_LNKCTL);
@@ -744,13 +751,13 @@ static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
val_w);
}
-static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp)
+static void tegra_pcie_enable_intx_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
- /* Enable legacy interrupt generation */
+ /* Enable INTX interrupt generation */
val = appl_readl(pcie, APPL_INTR_EN_L0_0);
val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
val |= APPL_INTR_EN_L0_0_INT_INT_EN;
@@ -801,7 +808,7 @@ static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp)
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
tegra_pcie_enable_system_interrupts(pp);
- tegra_pcie_enable_legacy_interrupts(pp);
+ tegra_pcie_enable_intx_interrupts(pp);
if (IS_ENABLED(CONFIG_PCI_MSI))
tegra_pcie_enable_msi_interrupts(pp);
}
@@ -838,9 +845,9 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
- val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
- val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
- val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
+ val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC, 0x3ff);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
@@ -849,10 +856,10 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
- val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
- val |= (pcie->of_data->gen4_preset_vec <<
- GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
- val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
+ val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC,
+ pcie->of_data->gen4_preset_vec);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
@@ -873,11 +880,6 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
PCI_CAP_ID_EXP);
- val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
- val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
- val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
- dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
-
val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
@@ -889,19 +891,13 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
- /* Enable as 0xFFFF0001 response for CRS */
+ /* Enable as 0xFFFF0001 response for RRS */
val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
- val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
- val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
- AMBA_ERROR_RESPONSE_CRS_SHIFT);
+ val &= ~(AMBA_ERROR_RESPONSE_RRS_MASK << AMBA_ERROR_RESPONSE_RRS_SHIFT);
+ val |= (AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 <<
+ AMBA_ERROR_RESPONSE_RRS_SHIFT);
dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
- /* Configure Max lane width from DT */
- val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
- val &= ~PCI_EXP_LNKCAP_MLW;
- val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
- dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
-
/* Clear Slot Clock Configuration bit if SRNS configuration */
if (pcie->enable_srns) {
val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
@@ -915,12 +911,6 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
init_host_aspm(pcie);
- /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
- if (!pcie->supports_clkreq) {
- disable_aspm_l11(pcie);
- disable_aspm_l12(pcie);
- }
-
if (!pcie->of_data->has_l1ss_exit_fix) {
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
@@ -940,9 +930,9 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
{
- u32 val, offset, speed, tmp;
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
struct dw_pcie_rp *pp = &pci->pp;
+ u32 val, offset, tmp;
bool retry = true;
if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
@@ -1013,21 +1003,19 @@ retry_link:
goto retry_link;
}
- speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
- PCI_EXP_LNKSTA_CLS;
- clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
+ tegra_pcie_icc_set(pcie);
tegra_pcie_enable_interrupts(pp);
return 0;
}
-static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
+static bool tegra_pcie_dw_link_up(struct dw_pcie *pci)
{
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
- return !!(val & PCI_EXP_LNKSTA_DLLLA);
+ return val & PCI_EXP_LNKSTA_DLLLA;
}
static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
@@ -1044,7 +1032,7 @@ static const struct dw_pcie_ops tegra_dw_pcie_ops = {
};
static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
- .host_init = tegra_pcie_dw_host_init,
+ .init = tegra_pcie_dw_host_init,
};
static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
@@ -1200,6 +1188,7 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
struct mrq_uphy_response resp;
struct tegra_bpmp_message msg;
struct mrq_uphy_request req;
+ int err;
/*
* Controller-5 doesn't need to have its state set by BPMP-FW in
@@ -1222,7 +1211,13 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
msg.rx.data = &resp;
msg.rx.size = sizeof(resp);
- return tegra_bpmp_transfer(pcie->bpmp, &msg);
+ err = tegra_bpmp_transfer(pcie->bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
}
static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
@@ -1231,6 +1226,7 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
struct mrq_uphy_response resp;
struct tegra_bpmp_message msg;
struct mrq_uphy_request req;
+ int err;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
@@ -1250,13 +1246,19 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
msg.rx.data = &resp;
msg.rx.size = sizeof(resp);
- return tegra_bpmp_transfer(pcie->bpmp, &msg);
+ err = tegra_bpmp_transfer(pcie->bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
}
static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
{
struct dw_pcie_rp *pp = &pcie->pci.pp;
- struct pci_bus *child, *root_bus = NULL;
+ struct pci_bus *child, *root_port_bus = NULL;
struct pci_dev *pdev;
/*
@@ -1269,19 +1271,19 @@ static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
*/
list_for_each_entry(child, &pp->bridge->bus->children, node) {
- /* Bring downstream devices to D0 if they are not already in */
if (child->parent == pp->bridge->bus) {
- root_bus = child;
+ root_port_bus = child;
break;
}
}
- if (!root_bus) {
- dev_err(pcie->dev, "Failed to find downstream devices\n");
+ if (!root_port_bus) {
+ dev_err(pcie->dev, "Failed to find downstream bus of Root Port\n");
return;
}
- list_for_each_entry(pdev, &root_bus->devices, bus_list) {
+ /* Bring downstream devices to D0 if they are not already in */
+ list_for_each_entry(pdev, &root_port_bus->devices, bus_list) {
if (PCI_SLOT(pdev->devfn) == 0) {
if (pci_set_power_state(pdev, PCI_D0))
dev_err(pcie->dev,
@@ -1629,7 +1631,6 @@ static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
{
struct device *dev = pcie->dev;
- char *name;
int ret;
pm_runtime_enable(dev);
@@ -1659,13 +1660,6 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
goto fail_host_init;
}
- name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
- if (!name) {
- ret = -ENOMEM;
- goto fail_host_init;
- }
-
- pcie->debugfs = debugfs_create_dir(name, NULL);
init_debugfs(pcie);
return ret;
@@ -1716,9 +1710,9 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
ret);
}
- ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
if (ret)
- dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
+ dev_err(pcie->dev, "Failed to disable controller: %d\n", ret);
pcie->ep_state = EP_STATE_DISABLED;
dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
@@ -1777,6 +1771,10 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
goto fail_phy;
}
+ /* Perform cleanup that requires refclk */
+ pci_epc_deinit_notify(pcie->pci.ep.epc);
+ dw_pcie_ep_cleanup(&pcie->pci.ep);
+
/* Clear any stale interrupt statuses */
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
@@ -1847,12 +1845,6 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
init_host_aspm(pcie);
- /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
- if (!pcie->supports_clkreq) {
- disable_aspm_l11(pcie);
- disable_aspm_l12(pcie);
- }
-
if (!pcie->of_data->has_l1ss_exit_fix) {
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
@@ -1862,11 +1854,6 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
PCI_CAP_ID_EXP);
- val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
- val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
- val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
- dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
-
/* Clear Slot Clock Configuration bit if SRNS configuration */
if (pcie->enable_srns) {
val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
@@ -1884,13 +1871,13 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
- ret = dw_pcie_ep_init_complete(ep);
+ ret = dw_pcie_ep_init_registers(ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
goto fail_init_complete;
}
- dw_pcie_ep_init_notify(ep);
+ pci_epc_init_notify(ep->epc);
/* Program the private control to allow sending LTR upstream */
if (pcie->of_data->has_ltr_req_fix) {
@@ -1936,7 +1923,16 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
+static void tegra_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+};
+
+static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
/* Tegra194 supports only INTA */
if (irq > 1)
@@ -1950,10 +1946,10 @@ static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
- if (unlikely(irq > 31))
+ if (unlikely(irq > 32))
return -EINVAL;
- appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
+ appl_writel(pcie, BIT(irq - 1), APPL_MSI_CTRL_1);
return 0;
}
@@ -1968,20 +1964,19 @@ static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
}
static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
+ case PCI_IRQ_INTX:
+ return tegra_pcie_ep_raise_intx_irq(pcie, interrupt_num);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
default:
@@ -1994,12 +1989,15 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features tegra_pcie_epc_features = {
.linkup_notifier = true,
- .core_init_notifier = true,
- .msi_capable = false,
- .msix_capable = false,
- .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
- .bar_fixed_64bit = 1 << BAR_0,
- .bar_fixed_size[0] = SZ_1M,
+ .msi_capable = true,
+ .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M,
+ .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_64K,
};
static const struct pci_epc_features*
@@ -2009,6 +2007,7 @@ tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .init = tegra_pcie_ep_init,
.raise_irq = tegra_pcie_ep_raise_irq,
.get_features = tegra_pcie_ep_get_features,
};
@@ -2219,6 +2218,14 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
+ pcie->icc_path = devm_of_icc_get(&pdev->dev, "write");
+ ret = PTR_ERR_OR_ZERO(pcie->icc_path);
+ if (ret) {
+ tegra_bpmp_put(pcie->bpmp);
+ dev_err_probe(&pdev->dev, ret, "failed to get write interconnect\n");
+ return ret;
+ }
+
switch (pcie->of_data->mode) {
case DW_PCIE_RC_TYPE:
ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
@@ -2251,11 +2258,14 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
ret = tegra_pcie_config_ep(pcie, pdev);
if (ret < 0)
goto fail;
+ else
+ return 0;
break;
default:
dev_err(dev, "Invalid PCIe device type %d\n",
pcie->of_data->mode);
+ ret = -EINVAL;
}
fail:
@@ -2263,13 +2273,13 @@ fail:
return ret;
}
-static int tegra_pcie_dw_remove(struct platform_device *pdev)
+static void tegra_pcie_dw_remove(struct platform_device *pdev)
{
struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
if (!pcie->link_state)
- return 0;
+ return;
debugfs_remove_recursive(pcie->debugfs);
tegra_pcie_deinit_controller(pcie);
@@ -2283,8 +2293,6 @@ static int tegra_pcie_dw_remove(struct platform_device *pdev)
tegra_bpmp_put(pcie->bpmp);
if (pcie->pex_refclk_sel_gpiod)
gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
-
- return 0;
}
static int tegra_pcie_dw_suspend_late(struct device *dev)
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
index 4d0a587c0ba5..d6e73811216e 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -11,7 +11,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/iopoll.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -212,7 +212,7 @@ static void uniphier_pcie_ep_init(struct dw_pcie_ep *ep)
dw_pcie_ep_reset_bar(pci, bar);
}
-static int uniphier_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep)
+static int uniphier_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
@@ -256,15 +256,14 @@ static int uniphier_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep,
}
static int uniphier_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return uniphier_pcie_ep_raise_legacy_irq(ep);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return uniphier_pcie_ep_raise_intx_irq(ep);
+ case PCI_IRQ_MSI:
return uniphier_pcie_ep_raise_msi_irq(ep, func_no,
interrupt_num);
default:
@@ -284,7 +283,7 @@ uniphier_pcie_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = {
- .ep_init = uniphier_pcie_ep_init,
+ .init = uniphier_pcie_ep_init,
.raise_irq = uniphier_pcie_ep_raise_irq,
.get_features = uniphier_pcie_get_features,
};
@@ -400,7 +399,20 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
return ret;
priv->pci.ep.ops = &uniphier_pcie_ep_ops;
- return dw_pcie_ep_init(&priv->pci.ep);
+ ret = dw_pcie_ep_init(&priv->pci.ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&priv->pci.ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&priv->pci.ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(priv->pci.ep.epc);
+
+ return 0;
}
static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = {
@@ -412,8 +424,12 @@ static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = {
.msi_capable = true,
.msix_capable = false,
.align = 1 << 16,
- .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
- .reserved_bar = BIT(BAR_4),
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
},
};
@@ -426,7 +442,12 @@ static const struct uniphier_pcie_ep_soc_data uniphier_nx1_data = {
.msi_capable = true,
.msix_capable = false,
.align = 1 << 12,
- .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .only_64bit = true, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
},
};
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 48c3eba817b4..297e7a3d9b36 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -67,7 +67,7 @@ struct uniphier_pcie {
struct clk *clk;
struct reset_control *rst;
struct phy *phy;
- struct irq_domain *legacy_irq_domain;
+ struct irq_domain *intx_irq_domain;
};
#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
@@ -135,7 +135,7 @@ static int uniphier_pcie_wait_rc(struct uniphier_pcie *pcie)
return 0;
}
-static int uniphier_pcie_link_up(struct dw_pcie *pci)
+static bool uniphier_pcie_link_up(struct dw_pcie *pci)
{
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
u32 val, mask;
@@ -253,12 +253,12 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
for_each_set_bit(bit, &reg, PCI_NUM_INTX)
- generic_handle_domain_irq(pcie->legacy_irq_domain, bit);
+ generic_handle_domain_irq(pcie->intx_irq_domain, bit);
chained_irq_exit(chip, desc);
}
-static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp)
+static int uniphier_pcie_config_intx_irq(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
@@ -279,9 +279,9 @@ static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp)
goto out_put_node;
}
- pcie->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
+ pcie->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(np_intc), PCI_NUM_INTX,
&uniphier_intx_domain_ops, pp);
- if (!pcie->legacy_irq_domain) {
+ if (!pcie->intx_irq_domain) {
dev_err(pci->dev, "Failed to get INTx domain\n");
ret = -ENODEV;
goto out_put_node;
@@ -301,7 +301,7 @@ static int uniphier_pcie_host_init(struct dw_pcie_rp *pp)
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
int ret;
- ret = uniphier_pcie_config_legacy_irq(pp);
+ ret = uniphier_pcie_config_intx_irq(pp);
if (ret)
return ret;
@@ -311,7 +311,7 @@ static int uniphier_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
- .host_init = uniphier_pcie_host_init,
+ .init = uniphier_pcie_host_init,
};
static int uniphier_pcie_host_enable(struct uniphier_pcie *pcie)
diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c
index 71026fefa366..cdeac6177143 100644
--- a/drivers/pci/controller/dwc/pcie-visconti.c
+++ b/drivers/pci/controller/dwc/pcie-visconti.c
@@ -121,13 +121,13 @@ static u32 visconti_mpu_readl(struct visconti_pcie *pcie, u32 reg)
return readl_relaxed(pcie->mpu_base + reg);
}
-static int visconti_pcie_link_up(struct dw_pcie *pci)
+static bool visconti_pcie_link_up(struct dw_pcie *pci)
{
struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
void __iomem *addr = pcie->ulreg_base;
u32 val = readl_relaxed(addr + PCIE_UL_REG_V_PHY_ST_02);
- return !!(val & PCIE_UL_S_L0);
+ return val & PCIE_UL_S_L0;
}
static int visconti_pcie_start_link(struct dw_pcie *pci)
@@ -236,7 +236,7 @@ static int visconti_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops visconti_pcie_host_ops = {
- .host_init = visconti_pcie_host_init,
+ .init = visconti_pcie_host_init,
};
static int visconti_get_resources(struct platform_device *pdev,
diff --git a/drivers/pci/controller/mobiveil/Kconfig b/drivers/pci/controller/mobiveil/Kconfig
index 1d7a07ba9ccd..c50c4625937f 100644
--- a/drivers/pci/controller/mobiveil/Kconfig
+++ b/drivers/pci/controller/mobiveil/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Mobiveil PCIe Core Support"
+menu "Mobiveil-based PCIe controllers"
depends on PCI
config PCIE_MOBIVEIL
@@ -9,8 +9,18 @@ config PCIE_MOBIVEIL
config PCIE_MOBIVEIL_HOST
bool
depends on PCI_MSI
+ select IRQ_MSI_LIB
select PCIE_MOBIVEIL
+config PCIE_LAYERSCAPE_GEN4
+ bool "Freescale Layerscape Gen4 PCIe controller"
+ depends on ARCH_LAYERSCAPE || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_MOBIVEIL_HOST
+ help
+ Say Y here if you want PCIe Gen4 controller support on
+ Layerscape SoCs.
+
config PCIE_MOBIVEIL_PLAT
bool "Mobiveil AXI PCIe controller"
depends on ARCH_ZYNQMP || COMPILE_TEST
@@ -22,12 +32,4 @@ config PCIE_MOBIVEIL_PLAT
Soft IP. It has up to 8 outbound and inbound windows
for address translation and it is a PCIe Gen4 IP.
-config PCIE_LAYERSCAPE_GEN4
- bool "Freescale Layerscape PCIe Gen4 controller"
- depends on ARCH_LAYERSCAPE || COMPILE_TEST
- depends on PCI_MSI
- select PCIE_MOBIVEIL_HOST
- help
- Say Y here if you want PCIe Gen4 controller support on
- Layerscape SoCs.
endmenu
diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
index d7b7350f02dd..4919b27eaf44 100644
--- a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
+++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
@@ -53,18 +53,13 @@ static inline void ls_g4_pcie_pf_writel(struct ls_g4_pcie *pcie,
iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
}
-static int ls_g4_pcie_link_up(struct mobiveil_pcie *pci)
+static bool ls_g4_pcie_link_up(struct mobiveil_pcie *pci)
{
struct ls_g4_pcie *pcie = to_ls_g4_pcie(pci);
u32 state;
state = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
- state = state & PF_DBG_LTSSM_MASK;
-
- if (state == PF_DBG_LTSSM_L0)
- return 1;
-
- return 0;
+ return (state & PF_DBG_LTSSM_MASK) == PF_DBG_LTSSM_L0;
}
static void ls_g4_pcie_disable_interrupt(struct ls_g4_pcie *pcie)
@@ -174,8 +169,7 @@ static int ls_g4_pcie_interrupt_init(struct mobiveil_pcie *mv_pci)
static void ls_g4_pcie_reset(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work, struct delayed_work,
- work);
+ struct delayed_work *dwork = to_delayed_work(work);
struct ls_g4_pcie *pcie = container_of(dwork, struct ls_g4_pcie, dwork);
struct mobiveil_pcie *mv_pci = &pcie->pci;
u16 ctrl;
@@ -190,7 +184,7 @@ static void ls_g4_pcie_reset(struct work_struct *work)
ls_g4_pcie_enable_interrupt(pcie);
}
-static struct mobiveil_rp_ops ls_g4_pcie_rp_ops = {
+static const struct mobiveil_rp_ops ls_g4_pcie_rp_ops = {
.interrupt_init = ls_g4_pcie_interrupt_init,
};
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index 31a7bdebe540..dbc72c73fd0a 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -12,14 +12,12 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/msi.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -356,16 +354,19 @@ static const struct irq_domain_ops intx_domain_ops = {
.map = mobiveil_pcie_intx_map,
};
-static struct irq_chip mobiveil_msi_irq_chip = {
- .name = "Mobiveil PCIe MSI",
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define MOBIVEIL_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define MOBIVEIL_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
-static struct msi_domain_info mobiveil_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &mobiveil_msi_irq_chip,
+static const struct msi_parent_ops mobiveil_msi_parent_ops = {
+ .required_flags = MOBIVEIL_MSI_FLAGS_REQUIRED,
+ .supported_flags = MOBIVEIL_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "Mobiveil-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -381,16 +382,9 @@ static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip mobiveil_msi_bottom_irq_chip = {
.name = "Mobiveil MSI",
.irq_compose_msi_msg = mobiveil_compose_msi_msg,
- .irq_set_affinity = mobiveil_msi_set_affinity,
};
static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
@@ -445,23 +439,20 @@ static const struct irq_domain_ops msi_domain_ops = {
static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
struct mobiveil_msi *msi = &pcie->rp.msi;
mutex_init(&msi->lock);
- msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
- &msi_domain_ops, pcie);
- if (!msi->dev_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &mobiveil_msi_domain_info,
- msi->dev_domain);
- if (!msi->msi_domain) {
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &msi_domain_ops,
+ .host_data = pcie,
+ .size = msi->num_of_vectors,
+ };
+
+ msi->dev_domain = msi_create_parent_irq_domain(&info, &mobiveil_msi_parent_ops);
+ if (!msi->dev_domain) {
dev_err(dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->dev_domain);
return -ENOMEM;
}
@@ -471,13 +462,11 @@ static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct device_node *node = dev->of_node;
struct mobiveil_root_port *rp = &pcie->rp;
/* setup INTx */
- rp->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
- &intx_domain_ops, pcie);
-
+ rp->intx_domain = irq_domain_create_linear(dev_fwnode(dev), PCI_NUM_INTX, &intx_domain_ops,
+ pcie);
if (!rp->intx_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENOMEM;
@@ -542,7 +531,7 @@ static bool mobiveil_pcie_is_bridge(struct mobiveil_pcie *pcie)
u32 header_type;
header_type = mobiveil_csr_readb(pcie, PCI_HEADER_TYPE);
- header_type &= 0x7f;
+ header_type &= PCI_HEADER_TYPE_MASK;
return header_type == PCI_HEADER_TYPE_BRIDGE;
}
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
index f6fcd95c2bf5..c5bb87ff6d9a 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
@@ -56,6 +56,5 @@ static struct platform_driver mobiveil_pcie_driver = {
builtin_platform_driver(mobiveil_pcie_driver);
-MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
index 6082b8afbc31..7246de6a7176 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil.h
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
@@ -135,7 +135,6 @@
struct mobiveil_msi { /* MSI information */
struct mutex lock; /* protect bitmap variable */
- struct irq_domain *msi_domain;
struct irq_domain *dev_domain;
phys_addr_t msi_pages_phys;
int num_of_vectors;
@@ -151,7 +150,7 @@ struct mobiveil_rp_ops {
struct mobiveil_root_port {
void __iomem *config_axi_slave_base; /* endpoint config base */
struct resource *ob_io_res;
- struct mobiveil_rp_ops *ops;
+ const struct mobiveil_rp_ops *ops;
int irq;
raw_spinlock_t intx_mask_lock;
struct irq_domain *intx_domain;
@@ -160,7 +159,7 @@ struct mobiveil_root_port {
};
struct mobiveil_pab_ops {
- int (*link_up)(struct mobiveil_pcie *pcie);
+ bool (*link_up)(struct mobiveil_pcie *pcie);
};
struct mobiveil_pcie {
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 513d8edf3a5c..e34bea1ff0ac 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -13,6 +13,7 @@
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -23,7 +24,6 @@
#include <linux/platform_device.h>
#include <linux/msi.h>
#include <linux/of_address.h>
-#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include "../pci.h"
@@ -51,7 +51,7 @@
#define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7)
#define PIO_COMPLETION_STATUS_OK 0
#define PIO_COMPLETION_STATUS_UR 1
-#define PIO_COMPLETION_STATUS_CRS 2
+#define PIO_COMPLETION_STATUS_RRS 2
#define PIO_COMPLETION_STATUS_CA 4
#define PIO_NON_POSTED_REQ BIT(10)
#define PIO_ERR_STATUS BIT(11)
@@ -263,7 +263,7 @@ enum {
#define MSI_IRQ_NUM 32
-#define CFG_RD_CRS_VAL 0xffff0001
+#define CFG_RD_RRS_VAL 0xffff0001
struct advk_pcie {
struct platform_device *pdev;
@@ -279,7 +279,6 @@ struct advk_pcie {
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
raw_spinlock_t irq_lock;
- struct irq_domain *msi_domain;
struct irq_domain *msi_inner_domain;
raw_spinlock_t msi_irq_lock;
DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
@@ -650,7 +649,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_pcie_train_link(pcie);
}
-static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
+static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_rrs, u32 *val)
{
struct device *dev = &pcie->pdev->dev;
u32 reg;
@@ -670,7 +669,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
* means a PIO write error, and for PIO read it is successful with
* a read value of 0xFFFFFFFF.
- * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
+ * 3) value Config Request Retry Status(RRS) of COMPLETION_STATUS(bit9:7)
* only means a PIO write error, and for PIO read it is successful
* with a read value of 0xFFFF0001.
* 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
@@ -695,10 +694,10 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
strcomp_status = "UR";
ret = -EOPNOTSUPP;
break;
- case PIO_COMPLETION_STATUS_CRS:
- if (allow_crs && val) {
- /* PCIe r4.0, sec 2.3.2, says:
- * If CRS Software Visibility is enabled:
+ case PIO_COMPLETION_STATUS_RRS:
+ if (allow_rrs && val) {
+ /* PCIe r6.0, sec 2.3.2, says:
+ * If Configuration RRS Software Visibility is enabled:
* For a Configuration Read Request that includes both
* bytes of the Vendor ID field of a device Function's
* Configuration Space Header, the Root Complex must
@@ -707,22 +706,22 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* all '1's for any additional bytes included in the
* request.
*
- * So CRS in this case is not an error status.
+ * So RRS in this case is not an error status.
*/
- *val = CFG_RD_CRS_VAL;
+ *val = CFG_RD_RRS_VAL;
strcomp_status = NULL;
ret = 0;
break;
}
- /* PCIe r4.0, sec 2.3.2, says:
- * If CRS Software Visibility is not enabled, the Root Complex
+ /* PCIe r6.0, sec 2.3.2, says:
+ * If RRS Software Visibility is not enabled, the Root Complex
* must re-issue the Configuration Request as a new Request.
- * If CRS Software Visibility is enabled: For a Configuration
+ * If RRS Software Visibility is enabled: For a Configuration
* Write Request or for any other Configuration Read Request,
* the Root Complex must re-issue the Configuration Request as
* a new Request.
* A Root Complex implementation may choose to limit the number
- * of Configuration Request/CRS Completion Status loops before
+ * of Configuration Request/RRS Completion Status loops before
* determining that something is wrong with the target of the
* Request and taking appropriate action, e.g., complete the
* Request to the host as a failed transaction.
@@ -730,7 +729,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* So return -EAGAIN and caller (pci-aardvark.c driver) will
* re-issue request again up to the PIO_RETRY_CNT retries.
*/
- strcomp_status = "CRS";
+ strcomp_status = "RRS";
ret = -EAGAIN;
break;
case PIO_COMPLETION_STATUS_CA:
@@ -921,8 +920,8 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
case PCI_EXP_RTCTL: {
u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl);
- /* Only emulation of PMEIE and CRSSVE bits is provided */
- rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE;
+ /* Only emulation of PMEIE and RRS_SVE bits is provided */
+ rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_RRS_SVE;
bridge->pcie_conf.rootctl = cpu_to_le16(rootctl);
break;
}
@@ -1076,7 +1075,7 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
/* Indicates supports for Completion Retry Status */
- bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
+ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_RRS_SV);
bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff;
bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16;
@@ -1142,7 +1141,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
{
struct advk_pcie *pcie = bus->sysdata;
int retry_count;
- bool allow_crs;
+ bool allow_rrs;
u32 reg;
int ret;
@@ -1154,16 +1153,16 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
size, val);
/*
- * Completion Retry Status is possible to return only when reading all
- * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
- * CRSSVE flag on Root Bridge is enabled.
+ * Configuration Request Retry Status (RRS) is possible to return
+ * only when reading both bytes from PCI_VENDOR_ID at once and
+ * RRS_SVE flag on Root Port is enabled.
*/
- allow_crs = (where == PCI_VENDOR_ID) && (size == 4) &&
+ allow_rrs = (where == PCI_VENDOR_ID) && (size >= 2) &&
(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
- PCI_EXP_RTCTL_CRSSVE);
+ PCI_EXP_RTCTL_RRS_SVE);
if (advk_pcie_pio_is_running(pcie))
- goto try_crs;
+ goto try_rrs;
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
@@ -1190,12 +1189,12 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
ret = advk_pcie_wait_pio(pcie);
if (ret < 0)
- goto try_crs;
+ goto try_rrs;
retry_count += ret;
/* Check PIO status and get the read result */
- ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
+ ret = advk_pcie_check_pio_status(pcie, allow_rrs, val);
} while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
if (ret < 0)
@@ -1208,13 +1207,13 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
return PCIBIOS_SUCCESSFUL;
-try_crs:
+try_rrs:
/*
- * If it is possible, return Completion Retry Status so that caller
- * tries to issue the request again instead of failing.
+ * If it is possible, return Configuration Request Retry Status so
+ * that caller tries to issue the request again instead of failing.
*/
- if (allow_crs) {
- *val = CFG_RD_CRS_VAL;
+ if (allow_rrs) {
+ *val = CFG_RD_RRS_VAL;
return PCIBIOS_SUCCESSFUL;
}
@@ -1305,12 +1304,6 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
msg->data = data->hwirq;
}
-static int advk_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void advk_msi_irq_mask(struct irq_data *d)
{
struct advk_pcie *pcie = d->domain->host_data;
@@ -1339,22 +1332,9 @@ static void advk_msi_irq_unmask(struct irq_data *d)
raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
}
-static void advk_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void advk_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
static struct irq_chip advk_msi_bottom_irq_chip = {
.name = "MSI",
.irq_compose_msi_msg = advk_msi_irq_compose_msi_msg,
- .irq_set_affinity = advk_msi_set_affinity,
.irq_mask = advk_msi_irq_mask,
.irq_unmask = advk_msi_irq_unmask,
};
@@ -1444,16 +1424,20 @@ static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static struct irq_chip advk_msi_irq_chip = {
- .name = "advk-MSI",
- .irq_mask = advk_msi_top_irq_mask,
- .irq_unmask = advk_msi_top_irq_unmask,
-};
-
-static struct msi_domain_info advk_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
- .chip = &advk_msi_irq_chip,
+#define ADVK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT | \
+ MSI_FLAG_NO_AFFINITY)
+#define ADVK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops advk_msi_parent_ops = {
+ .required_flags = ADVK_MSI_FLAGS_REQUIRED,
+ .supported_flags = ADVK_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "advk-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
@@ -1463,27 +1447,22 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
raw_spin_lock_init(&pcie->msi_irq_lock);
mutex_init(&pcie->msi_used_lock);
- pcie->msi_inner_domain =
- irq_domain_add_linear(NULL, MSI_IRQ_NUM,
- &advk_msi_domain_ops, pcie);
- if (!pcie->msi_inner_domain)
- return -ENOMEM;
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &advk_msi_domain_ops,
+ .host_data = pcie,
+ .size = MSI_IRQ_NUM,
+ };
- pcie->msi_domain =
- pci_msi_create_irq_domain(dev_fwnode(dev),
- &advk_msi_domain_info,
- pcie->msi_inner_domain);
- if (!pcie->msi_domain) {
- irq_domain_remove(pcie->msi_inner_domain);
+ pcie->msi_inner_domain = msi_create_parent_irq_domain(&info, &advk_msi_parent_ops);
+ if (!pcie->msi_inner_domain)
return -ENOMEM;
- }
return 0;
}
static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
{
- irq_domain_remove(pcie->msi_domain);
irq_domain_remove(pcie->msi_inner_domain);
}
@@ -1515,9 +1494,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
irq_chip->irq_mask = advk_pcie_irq_mask;
irq_chip->irq_unmask = advk_pcie_irq_unmask;
- pcie->irq_domain =
- irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &advk_pcie_irq_domain_ops, pcie);
+ pcie->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &advk_pcie_irq_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
ret = -ENOMEM;
@@ -1556,9 +1534,7 @@ static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = {
static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie)
{
- pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1,
- &advk_pcie_rp_irq_domain_ops,
- pcie);
+ pcie->rp_irq_domain = irq_domain_create_linear(NULL, 1, &advk_pcie_rp_irq_domain_ops, pcie);
if (!pcie->rp_irq_domain) {
dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n");
return -ENOMEM;
@@ -1927,7 +1903,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
return 0;
}
-static int advk_pcie_remove(struct platform_device *pdev)
+static void advk_pcie_remove(struct platform_device *pdev)
{
struct advk_pcie *pcie = platform_get_drvdata(pdev);
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
@@ -1989,8 +1965,6 @@ static int advk_pcie_remove(struct platform_device *pdev)
/* Disable phy */
advk_pcie_disable_phy(pcie);
-
- return 0;
}
static const struct of_device_id advk_pcie_of_match_table[] = {
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index ecd3009df586..28e43831c0f1 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -15,8 +15,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
@@ -346,8 +345,8 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
return irq ?: -EINVAL;
}
- p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &faraday_pci_irqdomain_ops, p);
+ p->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &faraday_pci_irqdomain_ops, p);
of_node_put(intc);
if (!p->irqdomain) {
dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
@@ -429,22 +428,12 @@ static int faraday_pci_probe(struct platform_device *pdev)
p->dev = dev;
/* Retrieve and enable optional clocks */
- clk = devm_clk_get(dev, "PCLK");
+ clk = devm_clk_get_enabled(dev, "PCLK");
if (IS_ERR(clk))
return PTR_ERR(clk);
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "could not prepare PCLK\n");
- return ret;
- }
- p->bus_clk = devm_clk_get(dev, "PCICLK");
+ p->bus_clk = devm_clk_get_enabled(dev, "PCICLK");
if (IS_ERR(p->bus_clk))
return PTR_ERR(p->bus_clk);
- ret = clk_prepare_enable(p->bus_clk);
- if (ret) {
- dev_err(dev, "could not prepare PCICLK\n");
- return ret;
- }
p->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(p->base))
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index d3924a44db02..c473e7c03bac 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Generic PCI host driver common code
+ * Common library for PCI host controller drivers
*
* Copyright (C) 2014 ARM Limited
*
@@ -9,18 +9,20 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_pci.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "pci-host-common.h"
+
static void gen_pci_unmap_cfg(void *ptr)
{
pci_ecam_free((struct pci_config_window *)ptr);
}
-static struct pci_config_window *gen_pci_init(struct device *dev,
+struct pci_config_window *pci_host_common_ecam_create(struct device *dev,
struct pci_host_bridge *bridge, const struct pci_ecam_ops *ops)
{
int err;
@@ -48,44 +50,52 @@ static struct pci_config_window *gen_pci_init(struct device *dev,
return cfg;
}
+EXPORT_SYMBOL_GPL(pci_host_common_ecam_create);
-int pci_host_common_probe(struct platform_device *pdev)
+int pci_host_common_init(struct platform_device *pdev,
+ struct pci_host_bridge *bridge,
+ const struct pci_ecam_ops *ops)
{
struct device *dev = &pdev->dev;
- struct pci_host_bridge *bridge;
struct pci_config_window *cfg;
- const struct pci_ecam_ops *ops;
-
- ops = of_device_get_match_data(&pdev->dev);
- if (!ops)
- return -ENODEV;
- bridge = devm_pci_alloc_host_bridge(dev, 0);
- if (!bridge)
- return -ENOMEM;
+ of_pci_check_probe_only();
platform_set_drvdata(pdev, bridge);
- of_pci_check_probe_only();
-
/* Parse and map our Configuration Space windows */
- cfg = gen_pci_init(dev, bridge, ops);
+ cfg = pci_host_common_ecam_create(dev, bridge, ops);
if (IS_ERR(cfg))
return PTR_ERR(cfg);
- /* Do not reassign resources if probe only */
- if (!pci_has_flag(PCI_PROBE_ONLY))
- pci_add_flags(PCI_REASSIGN_ALL_BUS);
-
bridge->sysdata = cfg;
bridge->ops = (struct pci_ops *)&ops->pci_ops;
+ bridge->enable_device = ops->enable_device;
+ bridge->disable_device = ops->disable_device;
bridge->msi_domain = true;
return pci_host_probe(bridge);
}
+EXPORT_SYMBOL_GPL(pci_host_common_init);
+
+int pci_host_common_probe(struct platform_device *pdev)
+{
+ const struct pci_ecam_ops *ops;
+ struct pci_host_bridge *bridge;
+
+ ops = of_device_get_match_data(&pdev->dev);
+ if (!ops)
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(&pdev->dev, 0);
+ if (!bridge)
+ return -ENOMEM;
+
+ return pci_host_common_init(pdev, bridge, ops);
+}
EXPORT_SYMBOL_GPL(pci_host_common_probe);
-int pci_host_common_remove(struct platform_device *pdev)
+void pci_host_common_remove(struct platform_device *pdev)
{
struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
@@ -93,9 +103,8 @@ int pci_host_common_remove(struct platform_device *pdev)
pci_stop_root_bus(bridge->bus);
pci_remove_root_bus(bridge->bus);
pci_unlock_rescan_remove();
-
- return 0;
}
EXPORT_SYMBOL_GPL(pci_host_common_remove);
+MODULE_DESCRIPTION("Common library for PCI host controller drivers");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-host-common.h b/drivers/pci/controller/pci-host-common.h
new file mode 100644
index 000000000000..b5075d4bd7eb
--- /dev/null
+++ b/drivers/pci/controller/pci-host-common.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common library for PCI host controller drivers
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#ifndef _PCI_HOST_COMMON_H
+#define _PCI_HOST_COMMON_H
+
+struct pci_ecam_ops;
+
+int pci_host_common_probe(struct platform_device *pdev);
+int pci_host_common_init(struct platform_device *pdev,
+ struct pci_host_bridge *bridge,
+ const struct pci_ecam_ops *ops);
+void pci_host_common_remove(struct platform_device *pdev);
+
+struct pci_config_window *pci_host_common_ecam_create(struct device *dev,
+ struct pci_host_bridge *bridge, const struct pci_ecam_ops *ops);
+#endif
diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c
index 63865aeb636b..c1bc0d34348f 100644
--- a/drivers/pci/controller/pci-host-generic.c
+++ b/drivers/pci/controller/pci-host-generic.c
@@ -14,6 +14,8 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "pci-host-common.h"
+
static const struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
.bus_shift = 16,
.pci_ops = {
@@ -86,4 +88,5 @@ static struct platform_driver gen_pci_driver = {
};
module_platform_driver(gen_pci_driver);
+MODULE_DESCRIPTION("Generic PCI host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-hyperv-intf.c b/drivers/pci/controller/pci-hyperv-intf.c
index cc96be450360..28b3e93d31c0 100644
--- a/drivers/pci/controller/pci-hyperv-intf.c
+++ b/drivers/pci/controller/pci-hyperv-intf.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/hyperv.h>
+#include <linux/export.h>
struct hyperv_pci_block_ops hvpci_block_ops;
EXPORT_SYMBOL_GPL(hvpci_block_ops);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 084f5313895c..1e237d3538f9 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -44,11 +44,14 @@
#include <linux/delay.h>
#include <linux/semaphore.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/msi.h>
#include <linux/hyperv.h>
#include <linux/refcount.h>
#include <linux/irqdomain.h>
#include <linux/acpi.h>
+#include <linux/sizes.h>
+#include <linux/of_irq.h>
#include <asm/mshyperv.h>
/*
@@ -308,8 +311,6 @@ struct pci_packet {
void (*completion_func)(void *context, struct pci_response *resp,
int resp_packet_size);
void *compl_ctxt;
-
- struct pci_message message[];
};
/*
@@ -465,7 +466,7 @@ struct pci_eject_response {
u32 status;
} __packed;
-static int pci_ring_size = (4 * PAGE_SIZE);
+static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K);
/*
* Driver specific state.
@@ -489,7 +490,10 @@ struct hv_pcibus_device {
struct fwnode_handle *fwnode;
/* Protocol version negotiated with the host */
enum pci_protocol_version_t protocol_version;
+
+ struct mutex state_lock;
enum hv_pcibus_state state;
+
struct hv_device *hdev;
resource_size_t low_mmio_space;
resource_size_t high_mmio_space;
@@ -505,22 +509,13 @@ struct hv_pcibus_device {
struct list_head children;
struct list_head dr_list;
- struct msi_domain_info msi_info;
struct irq_domain *irq_domain;
- spinlock_t retarget_msi_interrupt_lock;
-
struct workqueue_struct *wq;
/* Highest slot of child device with resources allocated */
int wslot_res_allocated;
-
- /* hypercall arg, must not cross page boundary */
- struct hv_retarget_device_interrupt retarget_msi_interrupt_params;
-
- /*
- * Don't put anything here: retarget_msi_interrupt_params must be last
- */
+ bool use_calls; /* Use hypercalls to access mmio cfg space */
};
/*
@@ -550,22 +545,13 @@ struct hv_pcidev_description {
struct hv_dr_state {
struct list_head list_entry;
u32 device_count;
- struct hv_pcidev_description func[];
-};
-
-enum hv_pcichild_state {
- hv_pcichild_init = 0,
- hv_pcichild_requirements,
- hv_pcichild_resourced,
- hv_pcichild_ejecting,
- hv_pcichild_maximum
+ struct hv_pcidev_description func[] __counted_by(device_count);
};
struct hv_pci_dev {
/* List protected by pci_rescan_remove_lock */
struct list_head list_entry;
refcount_t refs;
- enum hv_pcichild_state state;
struct pci_slot *pci_slot;
struct hv_pcidev_description desc;
bool reported_missing;
@@ -590,9 +576,8 @@ struct hv_pci_compl {
static void hv_pci_onchannelcallback(void *context);
#ifdef CONFIG_X86
-#define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED
-#define FLOW_HANDLER handle_edge_irq
-#define FLOW_NAME "edge"
+#define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED
+#define HV_MSI_CHIP_FLAGS MSI_CHIP_FLAG_SET_ACK
static int hv_pci_irqchip_init(void)
{
@@ -614,7 +599,7 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data)
#define hv_msi_prepare pci_msi_prepare
/**
- * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current
+ * hv_irq_retarget_interrupt() - "Unmask" the IRQ by setting its current
* affinity.
* @data: Describes the IRQ
*
@@ -623,7 +608,7 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data)
* is built out of this PCI bus's instance GUID and the function
* number of the device.
*/
-static void hv_arch_irq_unmask(struct irq_data *data)
+static void hv_irq_retarget_interrupt(struct irq_data *data)
{
struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
struct hv_retarget_device_interrupt *params;
@@ -643,10 +628,15 @@ static void hv_arch_irq_unmask(struct irq_data *data)
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
int_desc = data->chip_data;
+ if (!int_desc) {
+ dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n",
+ __func__, data->irq);
+ return;
+ }
- spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
+ local_irq_save(flags);
- params = &hbus->retarget_msi_interrupt_params;
+ params = *this_cpu_ptr(hyperv_pcpu_input_arg);
memset(params, 0, sizeof(*params));
params->partition_id = HV_PARTITION_ID_SELF;
params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
@@ -659,13 +649,6 @@ static void hv_arch_irq_unmask(struct irq_data *data)
PCI_FUNC(pdev->devfn);
params->int_target.vector = hv_msi_get_int_vector(data);
- /*
- * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by
- * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
- * spurious interrupt storm. Not doing so does not seem to have a
- * negative effect (yet?).
- */
-
if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
/*
* PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
@@ -679,7 +662,7 @@ static void hv_arch_irq_unmask(struct irq_data *data)
if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
res = 1;
- goto exit_unlock;
+ goto out;
}
cpumask_and(tmp, dest, cpu_online_mask);
@@ -688,7 +671,7 @@ static void hv_arch_irq_unmask(struct irq_data *data)
if (nr_bank <= 0) {
res = 1;
- goto exit_unlock;
+ goto out;
}
/*
@@ -707,8 +690,8 @@ static void hv_arch_irq_unmask(struct irq_data *data)
res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
params, NULL);
-exit_unlock:
- spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
+out:
+ local_irq_restore(flags);
/*
* During hibernation, when a CPU is offlined, the kernel tries
@@ -730,6 +713,20 @@ exit_unlock:
dev_err(&hbus->hdev->device,
"%s() failed: %#llx", __func__, res);
}
+
+static void hv_arch_irq_unmask(struct irq_data *data)
+{
+ if (hv_root_partition())
+ /*
+ * In case of the nested root partition, the nested hypervisor
+ * is taking care of interrupt remapping and thus the
+ * MAP_DEVICE_INTERRUPT hypercall is required instead of
+ * RETARGET_INTERRUPT.
+ */
+ (void)hv_map_msi_interrupt(data, NULL);
+ else
+ hv_irq_retarget_interrupt(data);
+}
#elif defined(CONFIG_ARM64)
/*
* SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
@@ -739,8 +736,7 @@ exit_unlock:
#define HV_PCI_MSI_SPI_START 64
#define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START)
#define DELIVERY_MODE 0
-#define FLOW_HANDLER NULL
-#define FLOW_NAME NULL
+#define HV_MSI_CHIP_FLAGS MSI_CHIP_FLAG_SET_EOI
#define hv_msi_prepare NULL
struct hv_pci_chip_data {
@@ -832,9 +828,17 @@ static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
int ret;
fwspec.fwnode = domain->parent->fwnode;
- fwspec.param_count = 2;
- fwspec.param[0] = hwirq;
- fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ if (is_of_node(fwspec.fwnode)) {
+ /* SPI lines for OF translations start at offset 32 */
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0;
+ fwspec.param[1] = hwirq - 32;
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+ } else {
+ fwspec.param_count = 2;
+ fwspec.param[0] = hwirq;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ }
ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
if (ret)
@@ -902,10 +906,44 @@ static const struct irq_domain_ops hv_pci_domain_ops = {
.activate = hv_pci_vec_irq_domain_activate,
};
+#ifdef CONFIG_OF
+
+static struct irq_domain *hv_pci_of_irq_domain_parent(void)
+{
+ struct device_node *parent;
+ struct irq_domain *domain;
+
+ parent = of_irq_find_parent(hv_get_vmbus_root_device()->of_node);
+ if (!parent)
+ return NULL;
+ domain = irq_find_host(parent);
+ of_node_put(parent);
+
+ return domain;
+}
+
+#endif
+
+#ifdef CONFIG_ACPI
+
+static struct irq_domain *hv_pci_acpi_irq_domain_parent(void)
+{
+ acpi_gsi_domain_disp_fn gsi_domain_disp_fn;
+
+ gsi_domain_disp_fn = acpi_get_gsi_dispatcher();
+ if (!gsi_domain_disp_fn)
+ return NULL;
+ return irq_find_matching_fwnode(gsi_domain_disp_fn(0),
+ DOMAIN_BUS_ANY);
+}
+
+#endif
+
static int hv_pci_irqchip_init(void)
{
static struct hv_pci_chip_data *chip_data;
struct fwnode_handle *fn = NULL;
+ struct irq_domain *irq_domain_parent = NULL;
int ret = -ENOMEM;
chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
@@ -922,9 +960,24 @@ static int hv_pci_irqchip_init(void)
* way to ensure that all the corresponding devices are also gone and
* no interrupts will be generated.
*/
- hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR,
- fn, &hv_pci_domain_ops,
- chip_data);
+#ifdef CONFIG_ACPI
+ if (!acpi_disabled)
+ irq_domain_parent = hv_pci_acpi_irq_domain_parent();
+#endif
+#ifdef CONFIG_OF
+ if (!irq_domain_parent)
+ irq_domain_parent = hv_pci_of_irq_domain_parent();
+#endif
+ if (!irq_domain_parent) {
+ WARN_ONCE(1, "Invalid firmware configuration for VMBus interrupts\n");
+ ret = -EINVAL;
+ goto free_chip;
+ }
+
+ hv_msi_gic_irq_domain = irq_domain_create_hierarchy(irq_domain_parent, 0,
+ HV_PCI_MSI_SPI_NR,
+ fn, &hv_pci_domain_ops,
+ chip_data);
if (!hv_msi_gic_irq_domain) {
pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
@@ -1041,6 +1094,70 @@ static int wslot_to_devfn(u32 wslot)
return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
}
+static void hv_pci_read_mmio(struct device *dev, phys_addr_t gpa, int size, u32 *val)
+{
+ struct hv_mmio_read_input *in;
+ struct hv_mmio_read_output *out;
+ u64 ret;
+
+ /*
+ * Must be called with interrupts disabled so it is safe
+ * to use the per-cpu input argument page. Use it for
+ * both input and output.
+ */
+ in = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ out = *this_cpu_ptr(hyperv_pcpu_input_arg) + sizeof(*in);
+ in->gpa = gpa;
+ in->size = size;
+
+ ret = hv_do_hypercall(HVCALL_MMIO_READ, in, out);
+ if (hv_result_success(ret)) {
+ switch (size) {
+ case 1:
+ *val = *(u8 *)(out->data);
+ break;
+ case 2:
+ *val = *(u16 *)(out->data);
+ break;
+ default:
+ *val = *(u32 *)(out->data);
+ break;
+ }
+ } else
+ dev_err(dev, "MMIO read hypercall error %llx addr %llx size %d\n",
+ ret, gpa, size);
+}
+
+static void hv_pci_write_mmio(struct device *dev, phys_addr_t gpa, int size, u32 val)
+{
+ struct hv_mmio_write_input *in;
+ u64 ret;
+
+ /*
+ * Must be called with interrupts disabled so it is safe
+ * to use the per-cpu input argument memory.
+ */
+ in = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ in->gpa = gpa;
+ in->size = size;
+ switch (size) {
+ case 1:
+ *(u8 *)(in->data) = val;
+ break;
+ case 2:
+ *(u16 *)(in->data) = val;
+ break;
+ default:
+ *(u32 *)(in->data) = val;
+ break;
+ }
+
+ ret = hv_do_hypercall(HVCALL_MMIO_WRITE, in, NULL);
+ if (!hv_result_success(ret))
+ dev_err(dev, "MMIO write hypercall error %llx addr %llx size %d\n",
+ ret, gpa, size);
+}
+
/*
* PCI Configuration Space for these root PCI buses is implemented as a pair
* of pages in memory-mapped I/O space. Writing to the first page chooses
@@ -1059,8 +1176,10 @@ static int wslot_to_devfn(u32 wslot)
static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
int size, u32 *val)
{
+ struct hv_pcibus_device *hbus = hpdev->hbus;
+ struct device *dev = &hbus->hdev->device;
+ int offset = where + CFG_PAGE_OFFSET;
unsigned long flags;
- void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
/*
* If the attempt is to read the IDs or the ROM BAR, simulate that.
@@ -1079,8 +1198,8 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
PCI_CAPABILITY_LIST) {
/* ROM BARs are unimplemented */
*val = 0;
- } else if (where >= PCI_INTERRUPT_LINE && where + size <=
- PCI_INTERRUPT_PIN) {
+ } else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) ||
+ (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) {
/*
* Interrupt Line and Interrupt PIN are hard-wired to zero
* because this front-end only supports message-signaled
@@ -1088,56 +1207,79 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
*/
*val = 0;
} else if (where + size <= CFG_PAGE_SIZE) {
- spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
- /* Choose the function to be read. (See comment above) */
- writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
- /* Make sure the function was chosen before we start reading. */
- mb();
- /* Read from that function's config space. */
- switch (size) {
- case 1:
- *val = readb(addr);
- break;
- case 2:
- *val = readw(addr);
- break;
- default:
- *val = readl(addr);
- break;
+
+ spin_lock_irqsave(&hbus->config_lock, flags);
+ if (hbus->use_calls) {
+ phys_addr_t addr = hbus->mem_config->start + offset;
+
+ hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
+ hpdev->desc.win_slot.slot);
+ hv_pci_read_mmio(dev, addr, size, val);
+ } else {
+ void __iomem *addr = hbus->cfg_addr + offset;
+
+ /* Choose the function to be read. (See comment above) */
+ writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
+ /* Make sure the function was chosen before reading. */
+ mb();
+ /* Read from that function's config space. */
+ switch (size) {
+ case 1:
+ *val = readb(addr);
+ break;
+ case 2:
+ *val = readw(addr);
+ break;
+ default:
+ *val = readl(addr);
+ break;
+ }
+ /*
+ * Make sure the read was done before we release the
+ * spinlock allowing consecutive reads/writes.
+ */
+ mb();
}
- /*
- * Make sure the read was done before we release the spinlock
- * allowing consecutive reads/writes.
- */
- mb();
- spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
+ spin_unlock_irqrestore(&hbus->config_lock, flags);
} else {
- dev_err(&hpdev->hbus->hdev->device,
- "Attempt to read beyond a function's config space.\n");
+ dev_err(dev, "Attempt to read beyond a function's config space.\n");
}
}
static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
{
+ struct hv_pcibus_device *hbus = hpdev->hbus;
+ struct device *dev = &hbus->hdev->device;
+ u32 val;
u16 ret;
unsigned long flags;
- void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET +
- PCI_VENDOR_ID;
- spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
+ spin_lock_irqsave(&hbus->config_lock, flags);
- /* Choose the function to be read. (See comment above) */
- writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
- /* Make sure the function was chosen before we start reading. */
- mb();
- /* Read from that function's config space. */
- ret = readw(addr);
- /*
- * mb() is not required here, because the spin_unlock_irqrestore()
- * is a barrier.
- */
+ if (hbus->use_calls) {
+ phys_addr_t addr = hbus->mem_config->start +
+ CFG_PAGE_OFFSET + PCI_VENDOR_ID;
- spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
+ hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
+ hpdev->desc.win_slot.slot);
+ hv_pci_read_mmio(dev, addr, 2, &val);
+ ret = val; /* Truncates to 16 bits */
+ } else {
+ void __iomem *addr = hbus->cfg_addr + CFG_PAGE_OFFSET +
+ PCI_VENDOR_ID;
+ /* Choose the function to be read. (See comment above) */
+ writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
+ /* Make sure the function was chosen before we start reading. */
+ mb();
+ /* Read from that function's config space. */
+ ret = readw(addr);
+ /*
+ * mb() is not required here, because the
+ * spin_unlock_irqrestore() is a barrier.
+ */
+ }
+
+ spin_unlock_irqrestore(&hbus->config_lock, flags);
return ret;
}
@@ -1152,39 +1294,51 @@ static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
int size, u32 val)
{
+ struct hv_pcibus_device *hbus = hpdev->hbus;
+ struct device *dev = &hbus->hdev->device;
+ int offset = where + CFG_PAGE_OFFSET;
unsigned long flags;
- void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
where + size <= PCI_CAPABILITY_LIST) {
/* SSIDs and ROM BARs are read-only */
} else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
- spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
- /* Choose the function to be written. (See comment above) */
- writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
- /* Make sure the function was chosen before we start writing. */
- wmb();
- /* Write to that function's config space. */
- switch (size) {
- case 1:
- writeb(val, addr);
- break;
- case 2:
- writew(val, addr);
- break;
- default:
- writel(val, addr);
- break;
+ spin_lock_irqsave(&hbus->config_lock, flags);
+
+ if (hbus->use_calls) {
+ phys_addr_t addr = hbus->mem_config->start + offset;
+
+ hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
+ hpdev->desc.win_slot.slot);
+ hv_pci_write_mmio(dev, addr, size, val);
+ } else {
+ void __iomem *addr = hbus->cfg_addr + offset;
+
+ /* Choose the function to write. (See comment above) */
+ writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
+ /* Make sure the function was chosen before writing. */
+ wmb();
+ /* Write to that function's config space. */
+ switch (size) {
+ case 1:
+ writeb(val, addr);
+ break;
+ case 2:
+ writew(val, addr);
+ break;
+ default:
+ writel(val, addr);
+ break;
+ }
+ /*
+ * Make sure the write was done before we release the
+ * spinlock allowing consecutive reads/writes.
+ */
+ mb();
}
- /*
- * Make sure the write was done before we release the spinlock
- * allowing consecutive reads/writes.
- */
- mb();
- spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
+ spin_unlock_irqrestore(&hbus->config_lock, flags);
} else {
- dev_err(&hpdev->hbus->hdev->device,
- "Attempt to write beyond a function's config space.\n");
+ dev_err(dev, "Attempt to write beyond a function's config space.\n");
}
}
@@ -1270,7 +1424,7 @@ static struct pci_ops hv_pcifront_ops = {
*
* If the PF driver wishes to initiate communication, it can "invalidate" one or
* more of the first 64 blocks. This invalidation is delivered via a callback
- * supplied by the VF driver by this driver.
+ * supplied to the VF driver by this driver.
*
* No protocol is implied, except that supplied by the PF and VF drivers.
*/
@@ -1352,7 +1506,7 @@ static int hv_read_config_block(struct pci_dev *pdev, void *buf,
memset(&pkt, 0, sizeof(pkt));
pkt.pkt.completion_func = hv_pci_read_config_compl;
pkt.pkt.compl_ctxt = &comp_pkt;
- read_blk = (struct pci_read_block *)&pkt.pkt.message;
+ read_blk = (struct pci_read_block *)pkt.buf;
read_blk->message_type.type = PCI_READ_BLOCK;
read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
read_blk->block_id = block_id;
@@ -1432,7 +1586,7 @@ static int hv_write_config_block(struct pci_dev *pdev, void *buf,
memset(&pkt, 0, sizeof(pkt));
pkt.pkt.completion_func = hv_pci_write_config_compl;
pkt.pkt.compl_ctxt = &comp_pkt;
- write_blk = (struct pci_write_block *)&pkt.pkt.message;
+ write_blk = (struct pci_write_block *)pkt.buf;
write_blk->message_type.type = PCI_WRITE_BLOCK;
write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
write_blk->block_id = block_id;
@@ -1513,7 +1667,7 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
return;
}
memset(&ctxt, 0, sizeof(ctxt));
- int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
+ int_pkt = (struct pci_delete_interrupt *)ctxt.buffer;
int_pkt->message_type.type =
PCI_DELETE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
@@ -1526,7 +1680,6 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
/**
* hv_msi_free() - Free the MSI.
* @domain: The interrupt domain pointer
- * @info: Extra MSI-related context
* @irq: Identifies the IRQ.
*
* The Hyper-V parent partition and hypervisor are tracking the
@@ -1534,8 +1687,7 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
* table up to date. This callback sends a message that frees
* the IRT entry and related tracking nonsense.
*/
-static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
- unsigned int irq)
+static void hv_msi_free(struct irq_domain *domain, unsigned int irq)
{
struct hv_pcibus_device *hbus;
struct hv_pci_dev *hpdev;
@@ -1545,7 +1697,7 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
pdev = msi_desc_to_pci_dev(msi);
- hbus = info->data;
+ hbus = domain->host_data;
int_desc = irq_data_get_irq_chip_data(irq_data);
if (!int_desc)
return;
@@ -1563,7 +1715,6 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
static void hv_irq_mask(struct irq_data *data)
{
- pci_msi_mask_irq(data);
if (data->parent_data->chip->irq_mask)
irq_chip_mask_parent(data);
}
@@ -1574,7 +1725,6 @@ static void hv_irq_unmask(struct irq_data *data)
if (data->parent_data->chip->irq_unmask)
irq_chip_unmask_parent(data);
- pci_msi_unmask_irq(data);
}
struct compose_comp_ctxt {
@@ -1671,8 +1821,7 @@ static int hv_compose_multi_msi_req_get_cpu(void)
spin_lock_irqsave(&multi_msi_cpu_lock, flags);
- cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
- false);
+ cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask);
cpu = cpu_next;
spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
@@ -1911,12 +2060,6 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
hv_pci_onchannelcallback(hbus);
spin_unlock_irqrestore(&channel->sched_lock, flags);
- if (hpdev->state == hv_pcichild_ejecting) {
- dev_err_once(&hbus->hdev->device,
- "the device is being ejected\n");
- goto enable_tasklet;
- }
-
udelay(100);
}
@@ -1966,23 +2109,85 @@ return_null_message:
msg->data = 0;
}
+static bool hv_pcie_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent, struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+ return false;
+
+ info->ops->msi_prepare = hv_msi_prepare;
+
+ chip->irq_set_affinity = irq_chip_set_affinity_parent;
+
+ if (IS_ENABLED(CONFIG_X86))
+ chip->flags |= IRQCHIP_MOVE_DEFERRED;
+
+ return true;
+}
+
+#define HV_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+#define HV_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_PCI_MSIX_ALLOC_DYN | \
+ MSI_GENERIC_FLAGS_MASK)
+
+static const struct msi_parent_ops hv_pcie_msi_parent_ops = {
+ .required_flags = HV_PCIE_MSI_FLAGS_REQUIRED,
+ .supported_flags = HV_PCIE_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = HV_MSI_CHIP_FLAGS,
+ .prefix = "HV-",
+ .init_dev_msi_info = hv_pcie_init_dev_msi_info,
+};
+
/* HW Interrupt Chip Descriptor */
static struct irq_chip hv_msi_irq_chip = {
.name = "Hyper-V PCIe MSI",
.irq_compose_msi_msg = hv_compose_msi_msg,
.irq_set_affinity = irq_chip_set_affinity_parent,
-#ifdef CONFIG_X86
.irq_ack = irq_chip_ack_parent,
-#elif defined(CONFIG_ARM64)
.irq_eoi = irq_chip_eoi_parent,
-#endif
.irq_mask = hv_irq_mask,
.irq_unmask = hv_irq_unmask,
};
-static struct msi_domain_ops hv_msi_ops = {
- .msi_prepare = hv_msi_prepare,
- .msi_free = hv_msi_free,
+static int hv_pcie_domain_alloc(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs,
+ void *arg)
+{
+ /*
+ * TODO: Allocating and populating struct tran_int_desc in hv_compose_msi_msg()
+ * should be moved here.
+ */
+ int ret;
+
+ ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, arg);
+ if (ret < 0)
+ return ret;
+
+ for (int i = 0; i < nr_irqs; i++) {
+ irq_domain_set_hwirq_and_chip(d, virq + i, 0, &hv_msi_irq_chip, NULL);
+ if (IS_ENABLED(CONFIG_X86))
+ __irq_set_handler(virq + i, handle_edge_irq, 0, "edge");
+ }
+
+ return 0;
+}
+
+static void hv_pcie_domain_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
+{
+ for (int i = 0; i < nr_irqs; i++)
+ hv_msi_free(d, virq + i);
+
+ irq_domain_free_irqs_top(d, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops hv_pcie_domain_ops = {
+ .alloc = hv_pcie_domain_alloc,
+ .free = hv_pcie_domain_free,
};
/**
@@ -2000,17 +2205,14 @@ static struct msi_domain_ops hv_msi_ops = {
*/
static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
{
- hbus->msi_info.chip = &hv_msi_irq_chip;
- hbus->msi_info.ops = &hv_msi_ops;
- hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
- MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
- MSI_FLAG_PCI_MSIX);
- hbus->msi_info.handler = FLOW_HANDLER;
- hbus->msi_info.handler_name = FLOW_NAME;
- hbus->msi_info.data = hbus;
- hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode,
- &hbus->msi_info,
- hv_pci_get_root_domain());
+ struct irq_domain_info info = {
+ .fwnode = hbus->fwnode,
+ .ops = &hv_pcie_domain_ops,
+ .host_data = hbus,
+ .parent = hv_pci_get_root_domain(),
+ };
+
+ hbus->irq_domain = msi_create_parent_irq_domain(&info, &hv_pcie_msi_parent_ops);
if (!hbus->irq_domain) {
dev_err(&hbus->hdev->device,
"Failed to build an MSI IRQ domain\n");
@@ -2402,7 +2604,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
comp_pkt.hpdev = hpdev;
pkt.init_packet.compl_ctxt = &comp_pkt;
pkt.init_packet.completion_func = q_resource_requirements;
- res_req = (struct pci_child_message *)&pkt.init_packet.message;
+ res_req = (struct pci_child_message *)pkt.buffer;
res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
res_req->wslot.slot = desc->win_slot.slot;
@@ -2522,6 +2724,8 @@ static void pci_devices_present_work(struct work_struct *work)
if (!dr)
return;
+ mutex_lock(&hbus->state_lock);
+
/* First, mark all existing children as reported missing. */
spin_lock_irqsave(&hbus->device_list_lock, flags);
list_for_each_entry(hpdev, &hbus->children, list_entry) {
@@ -2603,6 +2807,8 @@ static void pci_devices_present_work(struct work_struct *work)
break;
}
+ mutex_unlock(&hbus->state_lock);
+
kfree(dr);
}
@@ -2751,7 +2957,7 @@ static void hv_eject_device_work(struct work_struct *work)
hpdev = container_of(work, struct hv_pci_dev, wrk);
hbus = hpdev->hbus;
- WARN_ON(hpdev->state != hv_pcichild_ejecting);
+ mutex_lock(&hbus->state_lock);
/*
* Ejection can come before or after the PCI bus has been set up, so
@@ -2776,7 +2982,7 @@ static void hv_eject_device_work(struct work_struct *work)
pci_destroy_slot(hpdev->pci_slot);
memset(&ctxt, 0, sizeof(ctxt));
- ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
+ ejct_pkt = (struct pci_eject_response *)ctxt.buffer;
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
@@ -2789,6 +2995,8 @@ static void hv_eject_device_work(struct work_struct *work)
put_pcichild(hpdev);
put_pcichild(hpdev);
/* hpdev has been freed. Do not use it any more. */
+
+ mutex_unlock(&hbus->state_lock);
}
/**
@@ -2809,7 +3017,6 @@ static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
return;
}
- hpdev->state = hv_pcichild_ejecting;
get_pcichild(hpdev);
INIT_WORK(&hpdev->wrk, hv_eject_device_work);
queue_work(hbus->wq, &hpdev->wrk);
@@ -3033,7 +3240,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev,
init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- version_req = (struct pci_version_request *)&pkt->message;
+ version_req = (struct pci_version_request *)(pkt + 1);
version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
for (i = 0; i < num_version; i++) {
@@ -3238,8 +3445,10 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
struct pci_bus_d0_entry *d0_entry;
struct hv_pci_compl comp_pkt;
struct pci_packet *pkt;
+ bool retry = true;
int ret;
+enter_d0_retry:
/*
* Tell the host that the bus is ready to use, and moved into the
* powered-on state. This includes telling the host which region
@@ -3253,7 +3462,7 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
+ d0_entry = (struct pci_bus_d0_entry *)(pkt + 1);
d0_entry->message_type.type = PCI_BUS_D0ENTRY;
d0_entry->mmio_base = hbus->mem_config->start;
@@ -3266,6 +3475,38 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
if (ret)
goto exit;
+ /*
+ * In certain case (Kdump) the pci device of interest was
+ * not cleanly shut down and resource is still held on host
+ * side, the host could return invalid device status.
+ * We need to explicitly request host to release the resource
+ * and try to enter D0 again.
+ */
+ if (comp_pkt.completion_status < 0 && retry) {
+ retry = false;
+
+ dev_err(&hdev->device, "Retrying D0 Entry\n");
+
+ /*
+ * Hv_pci_bus_exit() calls hv_send_resource_released()
+ * to free up resources of its child devices.
+ * In the kdump kernel we need to set the
+ * wslot_res_allocated to 255 so it scans all child
+ * devices to release resources allocated in the
+ * normal kernel before panic happened.
+ */
+ hbus->wslot_res_allocated = 255;
+
+ ret = hv_pci_bus_exit(hdev, true);
+
+ if (ret == 0) {
+ kfree(pkt);
+ goto enter_d0_retry;
+ }
+ dev_err(&hdev->device,
+ "Retrying D0 failed with ret %d\n", ret);
+ }
+
if (comp_pkt.completion_status < 0) {
dev_err(&hdev->device,
"PCI Pass-through VSP failed D0 Entry with status %x\n",
@@ -3308,6 +3549,24 @@ static int hv_pci_query_relations(struct hv_device *hdev)
if (!ret)
ret = wait_for_response(hdev, &comp);
+ /*
+ * In the case of fast device addition/removal, it's possible that
+ * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we
+ * already got a PCI_BUS_RELATIONS* message from the host and the
+ * channel callback already scheduled a work to hbus->wq, which can be
+ * running pci_devices_present_work() -> survey_child_resources() ->
+ * complete(&hbus->survey_event), even after hv_pci_query_relations()
+ * exits and the stack variable 'comp' is no longer valid; as a result,
+ * a hang or a page fault may happen when the complete() calls
+ * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from
+ * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is
+ * -ENODEV, there can't be any more work item scheduled to hbus->wq
+ * after the flush_workqueue(): see vmbus_onoffer_rescind() ->
+ * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() ->
+ * channel->rescind = true.
+ */
+ flush_workqueue(hbus->wq);
+
return ret;
}
@@ -3361,20 +3620,20 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
res_assigned =
- (struct pci_resources_assigned *)&pkt->message;
+ (struct pci_resources_assigned *)(pkt + 1);
res_assigned->message_type.type =
PCI_RESOURCES_ASSIGNED;
res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
} else {
res_assigned2 =
- (struct pci_resources_assigned2 *)&pkt->message;
+ (struct pci_resources_assigned2 *)(pkt + 1);
res_assigned2->message_type.type =
PCI_RESOURCES_ASSIGNED2;
res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
}
put_pcichild(hpdev);
- ret = vmbus_sendpacket(hdev->channel, &pkt->message,
+ ret = vmbus_sendpacket(hdev->channel, pkt + 1,
size_res, (unsigned long)pkt,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -3437,48 +3696,6 @@ static int hv_send_resources_released(struct hv_device *hdev)
return 0;
}
-#define HVPCI_DOM_MAP_SIZE (64 * 1024)
-static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
-
-/*
- * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0
- * as invalid for passthrough PCI devices of this driver.
- */
-#define HVPCI_DOM_INVALID 0
-
-/**
- * hv_get_dom_num() - Get a valid PCI domain number
- * Check if the PCI domain number is in use, and return another number if
- * it is in use.
- *
- * @dom: Requested domain number
- *
- * return: domain number on success, HVPCI_DOM_INVALID on failure
- */
-static u16 hv_get_dom_num(u16 dom)
-{
- unsigned int i;
-
- if (test_and_set_bit(dom, hvpci_dom_map) == 0)
- return dom;
-
- for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) {
- if (test_and_set_bit(i, hvpci_dom_map) == 0)
- return i;
- }
-
- return HVPCI_DOM_INVALID;
-}
-
-/**
- * hv_put_dom_num() - Mark the PCI domain number as free
- * @dom: Domain number to be freed
- */
-static void hv_put_dom_num(u16 dom)
-{
- clear_bit(dom, hvpci_dom_map);
-}
-
/**
* hv_pci_probe() - New VMBus channel probe, for a root PCI bus
* @hdev: VMBus's tracking struct for this root PCI bus
@@ -3491,44 +3708,20 @@ static int hv_pci_probe(struct hv_device *hdev,
{
struct pci_host_bridge *bridge;
struct hv_pcibus_device *hbus;
- u16 dom_req, dom;
+ int ret, dom;
+ u16 dom_req;
char *name;
- bool enter_d0_retry = true;
- int ret;
-
- /*
- * hv_pcibus_device contains the hypercall arguments for retargeting in
- * hv_irq_unmask(). Those must not cross a page boundary.
- */
- BUILD_BUG_ON(sizeof(*hbus) > HV_HYP_PAGE_SIZE);
bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
if (!bridge)
return -ENOMEM;
- /*
- * With the recent 59bb47985c1d ("mm, sl[aou]b: guarantee natural
- * alignment for kmalloc(power-of-two)"), kzalloc() is able to allocate
- * a 4KB buffer that is guaranteed to be 4KB-aligned. Here the size and
- * alignment of hbus is important because hbus's field
- * retarget_msi_interrupt_params must not cross a 4KB page boundary.
- *
- * Here we prefer kzalloc to get_zeroed_page(), because a buffer
- * allocated by the latter is not tracked and scanned by kmemleak, and
- * hence kmemleak reports the pointer contained in the hbus buffer
- * (i.e. the hpdev struct, which is created in new_pcichild_device() and
- * is tracked by hbus->children) as memory leak (false positive).
- *
- * If the kernel doesn't have 59bb47985c1d, get_zeroed_page() *must* be
- * used to allocate the hbus buffer and we can avoid the kmemleak false
- * positive by using kmemleak_alloc() and kmemleak_free() to ask
- * kmemleak to track and scan the hbus buffer.
- */
- hbus = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
+ hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
if (!hbus)
return -ENOMEM;
hbus->bridge = bridge;
+ mutex_init(&hbus->state_lock);
hbus->state = hv_pcibus_init;
hbus->wslot_res_allocated = -1;
@@ -3544,11 +3737,14 @@ static int hv_pci_probe(struct hv_device *hdev,
* PCI bus (which is actually emulated by the hypervisor) is domain 0.
* (2) There will be no overlap between domains (after fixing possible
* collisions) in the same VM.
+ *
+ * Because Gen1 VMs use domain 0, don't allow picking domain 0 here,
+ * even if bytes 4 and 5 of the instance GUID are both zero. For wider
+ * userspace compatibility, limit the domain ID to a 16-bit value.
*/
dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
- dom = hv_get_dom_num(dom_req);
-
- if (dom == HVPCI_DOM_INVALID) {
+ dom = pci_bus_find_emul_domain_nr(dom_req, 1, U16_MAX);
+ if (dom < 0) {
dev_err(&hdev->device,
"Unable to use dom# 0x%x or other numbers", dom_req);
ret = -EINVAL;
@@ -3563,6 +3759,7 @@ static int hv_pci_probe(struct hv_device *hdev,
hbus->bridge->domain_nr = dom;
#ifdef CONFIG_X86
hbus->sysdata.domain = dom;
+ hbus->use_calls = !!(ms_hyperv.hints & HV_X64_USE_MMIO_HYPERCALLS);
#elif defined(CONFIG_ARM64)
/*
* Set the PCI bus parent to be the corresponding VMbus
@@ -3572,6 +3769,7 @@ static int hv_pci_probe(struct hv_device *hdev,
* information to devices created on the bus.
*/
hbus->sysdata.parent = hdev->device.parent;
+ hbus->use_calls = false;
#endif
hbus->hdev = hdev;
@@ -3579,7 +3777,6 @@ static int hv_pci_probe(struct hv_device *hdev,
INIT_LIST_HEAD(&hbus->dr_list);
spin_lock_init(&hbus->config_lock);
spin_lock_init(&hbus->device_list_lock);
- spin_lock_init(&hbus->retarget_msi_interrupt_lock);
hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
hbus->bridge->domain_nr);
if (!hbus->wq) {
@@ -3633,49 +3830,15 @@ static int hv_pci_probe(struct hv_device *hdev,
if (ret)
goto free_fwnode;
-retry:
ret = hv_pci_query_relations(hdev);
if (ret)
goto free_irq_domain;
- ret = hv_pci_enter_d0(hdev);
- /*
- * In certain case (Kdump) the pci device of interest was
- * not cleanly shut down and resource is still held on host
- * side, the host could return invalid device status.
- * We need to explicitly request host to release the resource
- * and try to enter D0 again.
- * Since the hv_pci_bus_exit() call releases structures
- * of all its child devices, we need to start the retry from
- * hv_pci_query_relations() call, requesting host to send
- * the synchronous child device relations message before this
- * information is needed in hv_send_resources_allocated()
- * call later.
- */
- if (ret == -EPROTO && enter_d0_retry) {
- enter_d0_retry = false;
+ mutex_lock(&hbus->state_lock);
- dev_err(&hdev->device, "Retrying D0 Entry\n");
-
- /*
- * Hv_pci_bus_exit() calls hv_send_resources_released()
- * to free up resources of its child devices.
- * In the kdump kernel we need to set the
- * wslot_res_allocated to 255 so it scans all child
- * devices to release resources allocated in the
- * normal kernel before panic happened.
- */
- hbus->wslot_res_allocated = 255;
- ret = hv_pci_bus_exit(hdev, true);
-
- if (ret == 0)
- goto retry;
-
- dev_err(&hdev->device,
- "Retrying D0 failed with ret %d\n", ret);
- }
+ ret = hv_pci_enter_d0(hdev);
if (ret)
- goto free_irq_domain;
+ goto release_state_lock;
ret = hv_pci_allocate_bridge_windows(hbus);
if (ret)
@@ -3693,12 +3856,15 @@ retry:
if (ret)
goto free_windows;
+ mutex_unlock(&hbus->state_lock);
return 0;
free_windows:
hv_pci_free_bridge_windows(hbus);
exit_d0:
(void) hv_pci_bus_exit(hdev, true);
+release_state_lock:
+ mutex_unlock(&hbus->state_lock);
free_irq_domain:
irq_domain_remove(hbus->irq_domain);
free_fwnode:
@@ -3712,7 +3878,7 @@ close:
destroy_wq:
destroy_workqueue(hbus->wq);
free_dom:
- hv_put_dom_num(hbus->bridge->domain_nr);
+ pci_bus_release_emul_domain_nr(hbus->bridge->domain_nr);
free_bus:
kfree(hbus);
return ret;
@@ -3726,6 +3892,7 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
struct pci_packet teardown_packet;
u8 buffer[sizeof(struct pci_message)];
} pkt;
+ struct pci_message *msg;
struct hv_pci_compl comp_pkt;
struct hv_pci_dev *hpdev, *tmp;
unsigned long flags;
@@ -3771,10 +3938,10 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
init_completion(&comp_pkt.host_event);
pkt.teardown_packet.completion_func = hv_pci_generic_compl;
pkt.teardown_packet.compl_ctxt = &comp_pkt;
- pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
+ msg = (struct pci_message *)pkt.buffer;
+ msg->type = PCI_BUS_D0EXIT;
- ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message,
- sizeof(struct pci_message),
+ ret = vmbus_sendpacket_getid(chan, msg, sizeof(*msg),
(unsigned long)&pkt.teardown_packet,
&trans_id, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -3800,13 +3967,10 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
/**
* hv_pci_remove() - Remove routine for this VMBus channel
* @hdev: VMBus's tracking struct for this root PCI bus
- *
- * Return: 0 on success, -errno on failure
*/
-static int hv_pci_remove(struct hv_device *hdev)
+static void hv_pci_remove(struct hv_device *hdev)
{
struct hv_pcibus_device *hbus;
- int ret;
hbus = hv_get_drvdata(hdev);
if (hbus->state == hv_pcibus_installed) {
@@ -3829,7 +3993,7 @@ static int hv_pci_remove(struct hv_device *hdev)
pci_unlock_rescan_remove();
}
- ret = hv_pci_bus_exit(hdev, false);
+ hv_pci_bus_exit(hdev, false);
vmbus_close(hdev->channel);
@@ -3839,10 +4003,7 @@ static int hv_pci_remove(struct hv_device *hdev)
irq_domain_remove(hbus->irq_domain);
irq_domain_free_fwnode(hbus->fwnode);
- hv_put_dom_num(hbus->bridge->domain_nr);
-
kfree(hbus);
- return ret;
}
static int hv_pci_suspend(struct hv_device *hdev)
@@ -3896,21 +4057,18 @@ static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
{
struct irq_data *irq_data;
struct msi_desc *entry;
- int ret = 0;
- msi_lock_descs(&pdev->dev);
+ if (!pdev->msi_enabled && !pdev->msix_enabled)
+ return 0;
+
+ guard(msi_descs_lock)(&pdev->dev);
msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
irq_data = irq_get_irq_data(entry->irq);
- if (WARN_ON_ONCE(!irq_data)) {
- ret = -EINVAL;
- break;
- }
-
+ if (WARN_ON_ONCE(!irq_data))
+ return -EINVAL;
hv_compose_msi_msg(irq_data, &entry->msg);
}
- msi_unlock_descs(&pdev->dev);
-
- return ret;
+ return 0;
}
/*
@@ -3952,20 +4110,26 @@ static int hv_pci_resume(struct hv_device *hdev)
if (ret)
goto out;
+ mutex_lock(&hbus->state_lock);
+
ret = hv_pci_enter_d0(hdev);
if (ret)
- goto out;
+ goto release_state_lock;
ret = hv_send_resources_allocated(hdev);
if (ret)
- goto out;
+ goto release_state_lock;
prepopulate_bars(hbus);
hv_pci_restore_msi_state(hbus);
hbus->state = hv_pcibus_installed;
+ mutex_unlock(&hbus->state_lock);
return 0;
+
+release_state_lock:
+ mutex_unlock(&hbus->state_lock);
out:
vmbus_close(hdev->channel);
return ret;
@@ -4005,13 +4169,13 @@ static int __init init_hv_pci_drv(void)
if (!hv_is_hyperv_initialized())
return -ENODEV;
+ if (hv_root_partition() && !hv_nested)
+ return -ENODEV;
+
ret = hv_pci_irqchip_init();
if (ret)
return ret;
- /* Set the invalid domain number's bit, so it will not be used */
- set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
-
/* Initialize PCI block r/w interface */
hvpci_block_ops.read_block = hv_read_config_block;
hvpci_block_ops.write_block = hv_write_config_block;
diff --git a/drivers/pci/controller/pci-ixp4xx.c b/drivers/pci/controller/pci-ixp4xx.c
index 654ac4a82beb..9fd401838bad 100644
--- a/drivers/pci/controller/pci-ixp4xx.c
+++ b/drivers/pci/controller/pci-ixp4xx.c
@@ -19,13 +19,13 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/bits.h>
+#include "../pci.h"
/* Register offsets */
#define IXP4XX_PCI_NP_AD 0x00
@@ -188,12 +188,13 @@ static u32 ixp4xx_config_addr(u8 bus_num, u16 devfn, int where)
/* Root bus is always 0 in this hardware */
if (bus_num == 0) {
/* type 0 */
- return BIT(32-PCI_SLOT(devfn)) | ((PCI_FUNC(devfn)) << 8) |
- (where & ~3);
+ return (PCI_CONF1_ADDRESS(0, 0, PCI_FUNC(devfn), where) &
+ ~PCI_CONF1_ENABLE) | BIT(32-PCI_SLOT(devfn));
} else {
/* type 1 */
- return (bus_num << 16) | ((PCI_SLOT(devfn)) << 11) |
- ((PCI_FUNC(devfn)) << 8) | (where & ~3) | 1;
+ return (PCI_CONF1_ADDRESS(bus_num, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where) &
+ ~PCI_CONF1_ENABLE) | 1;
}
}
@@ -213,6 +214,7 @@ static u32 ixp4xx_crp_byte_lane_enable_bits(u32 n, int size)
return 0xffffffff;
}
+#ifdef CONFIG_ARM
static int ixp4xx_crp_read_config(struct ixp4xx_pci *p, int where, int size,
u32 *value)
{
@@ -250,6 +252,7 @@ static int ixp4xx_crp_read_config(struct ixp4xx_pci *p, int where, int size,
return PCIBIOS_SUCCESSFUL;
}
+#endif
static int ixp4xx_crp_write_config(struct ixp4xx_pci *p, int where, int size,
u32 value)
@@ -469,6 +472,7 @@ static int ixp4xx_pci_parse_map_dma_ranges(struct ixp4xx_pci *p)
return 0;
}
+#ifdef CONFIG_ARM
/* Only used to get context for abort handling */
static struct ixp4xx_pci *ixp4xx_pci_abort_singleton;
@@ -508,6 +512,7 @@ static int ixp4xx_pci_abort_handler(unsigned long addr, unsigned int fsr,
return 0;
}
+#endif
static int __init ixp4xx_pci_probe(struct platform_device *pdev)
{
@@ -554,10 +559,12 @@ static int __init ixp4xx_pci_probe(struct platform_device *pdev)
dev_info(dev, "controller is in %s mode\n",
p->host_mode ? "host" : "option");
+#ifdef CONFIG_ARM
/* Hook in our fault handler for PCI errors */
ixp4xx_pci_abort_singleton = p;
hook_fault_code(16+6, ixp4xx_pci_abort_handler, SIGBUS, 0,
"imprecise external abort");
+#endif
ret = ixp4xx_pci_parse_map_ranges(p);
if (ret)
diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
index 05c50408f13b..bc630ab8a283 100644
--- a/drivers/pci/controller/pci-loongson.c
+++ b/drivers/pci/controller/pci-loongson.c
@@ -5,7 +5,7 @@
* Copyright (C) 2020 Jiaxun Yang <jiaxun.yang@flygoat.com>
*/
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
@@ -15,9 +15,14 @@
#include "../pci.h"
/* Device IDs */
-#define DEV_PCIE_PORT_0 0x7a09
-#define DEV_PCIE_PORT_1 0x7a19
-#define DEV_PCIE_PORT_2 0x7a29
+#define DEV_LS2K_PCIE_PORT0 0x1a05
+#define DEV_LS7A_PCIE_PORT0 0x7a09
+#define DEV_LS7A_PCIE_PORT1 0x7a19
+#define DEV_LS7A_PCIE_PORT2 0x7a29
+#define DEV_LS7A_PCIE_PORT3 0x7a39
+#define DEV_LS7A_PCIE_PORT4 0x7a49
+#define DEV_LS7A_PCIE_PORT5 0x7a59
+#define DEV_LS7A_PCIE_PORT6 0x7a69
#define DEV_LS2K_APB 0x7a02
#define DEV_LS7A_GMAC 0x7a03
@@ -53,11 +58,11 @@ static void bridge_class_quirk(struct pci_dev *dev)
dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
- DEV_PCIE_PORT_0, bridge_class_quirk);
+ DEV_LS7A_PCIE_PORT0, bridge_class_quirk);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
- DEV_PCIE_PORT_1, bridge_class_quirk);
+ DEV_LS7A_PCIE_PORT1, bridge_class_quirk);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
- DEV_PCIE_PORT_2, bridge_class_quirk);
+ DEV_LS7A_PCIE_PORT2, bridge_class_quirk);
static void system_bus_quirk(struct pci_dev *pdev)
{
@@ -75,14 +80,27 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_LPC, system_bus_quirk);
-static void loongson_mrrs_quirk(struct pci_dev *dev)
+/*
+ * Some Loongson PCIe ports have hardware limitations on their Maximum Read
+ * Request Size. They can't handle anything larger than this. Sane
+ * firmware will set proper MRRS at boot, so we only need no_inc_mrrs for
+ * bridges. However, some MIPS Loongson firmware doesn't set MRRS properly,
+ * so we have to enforce maximum safe MRRS, which is 256 bytes.
+ */
+#ifdef CONFIG_MIPS
+static void loongson_set_min_mrrs_quirk(struct pci_dev *pdev)
{
- struct pci_bus *bus = dev->bus;
+ struct pci_bus *bus = pdev->bus;
struct pci_dev *bridge;
static const struct pci_device_id bridge_devids[] = {
- { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_0) },
- { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_1) },
- { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_2) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS2K_PCIE_PORT0) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT0) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT1) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT2) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT3) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT4) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT5) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT6) },
{ 0, },
};
@@ -90,22 +108,41 @@ static void loongson_mrrs_quirk(struct pci_dev *dev)
while (!pci_is_root_bus(bus)) {
bridge = bus->self;
bus = bus->parent;
- /*
- * Some Loongson PCIe ports have a h/w limitation of
- * 256 bytes maximum read request size. They can't handle
- * anything larger than this. So force this limit on
- * any devices attached under these ports.
- */
+
if (pci_match_id(bridge_devids, bridge)) {
- if (pcie_get_readrq(dev) > 256) {
- pci_info(dev, "limiting MRRS to 256\n");
- pcie_set_readrq(dev, 256);
+ if (pcie_get_readrq(pdev) > 256) {
+ pci_info(pdev, "limiting MRRS to 256\n");
+ pcie_set_readrq(pdev, 256);
}
break;
}
}
}
-DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_set_min_mrrs_quirk);
+#endif
+
+static void loongson_mrrs_quirk(struct pci_dev *pdev)
+{
+ struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
+
+ bridge->no_inc_mrrs = 1;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS2K_PCIE_PORT0, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT0, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT1, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT2, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT3, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT4, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT5, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT6, loongson_mrrs_quirk);
static void loongson_pci_pin_quirk(struct pci_dev *pdev)
{
@@ -126,6 +163,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_HDMI, loongson_pci_pin_quirk);
+static void loongson_pci_msi_quirk(struct pci_dev *dev)
+{
+ u16 val, class = dev->class >> 8;
+
+ if (class != PCI_CLASS_BRIDGE_HOST)
+ return;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &val);
+ val |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, val);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT5, loongson_pci_msi_quirk);
+
static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus)
{
struct pci_config_window *cfg;
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 1dc209f6f53a..a72aa57591c0 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -87,7 +87,6 @@ struct mvebu_pcie {
struct resource io;
struct resource realio;
struct resource mem;
- struct resource busn;
int nports;
};
@@ -265,7 +264,7 @@ static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
*/
lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
lnkcap &= ~PCI_EXP_LNKCAP_MLW;
- lnkcap |= (port->is_x4 ? 4 : 1) << 4;
+ lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, port->is_x4 ? 4 : 1);
mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
/* Disable Root Bridge I/O space, memory space and bus mastering. */
@@ -1079,9 +1078,9 @@ static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port)
return -ENODEV;
}
- port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &mvebu_pcie_intx_irq_domain_ops,
- port);
+ port->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX,
+ &mvebu_pcie_intx_irq_domain_ops, port);
of_node_put(pcie_intc_node);
if (!port->intx_irq_domain) {
dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name);
@@ -1169,48 +1168,27 @@ static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
return devm_ioremap_resource(&pdev->dev, &port->regs);
}
-#define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
-#define DT_TYPE_IO 0x1
-#define DT_TYPE_MEM32 0x2
-#define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
-#define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
-
static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
unsigned long type,
unsigned int *tgt,
unsigned int *attr)
{
- const int na = 3, ns = 2;
- const __be32 *range;
- int rlen, nranges, rangesz, pna, i;
+ struct of_range range;
+ struct of_range_parser parser;
*tgt = -1;
*attr = -1;
- range = of_get_property(np, "ranges", &rlen);
- if (!range)
+ if (of_pci_range_parser_init(&parser, np))
return -EINVAL;
- pna = of_n_addr_cells(np);
- rangesz = pna + na + ns;
- nranges = rlen / sizeof(__be32) / rangesz;
-
- for (i = 0; i < nranges; i++, range += rangesz) {
- u32 flags = of_read_number(range, 1);
- u32 slot = of_read_number(range + 1, 1);
- u64 cpuaddr = of_read_number(range + na, pna);
- unsigned long rtype;
-
- if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
- rtype = IORESOURCE_IO;
- else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
- rtype = IORESOURCE_MEM;
- else
- continue;
+ for_each_of_range(&parser, &range) {
+ u32 slot = upper_32_bits(range.bus_addr);
- if (slot == PCI_SLOT(devfn) && type == rtype) {
- *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
- *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
+ if (slot == PCI_SLOT(devfn) &&
+ type == (range.flags & IORESOURCE_TYPE_BITS)) {
+ *tgt = (range.parent_bus_addr >> 56) & 0xFF;
+ *attr = (range.parent_bus_addr >> 48) & 0xFF;
return 0;
}
}
@@ -1362,11 +1340,9 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
goto skip;
}
- ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
- if (ret < 0) {
- clk_put(port->clk);
+ ret = devm_add_action_or_reset(dev, mvebu_pcie_port_clk_put, port);
+ if (ret < 0)
goto err;
- }
return 1;
@@ -1423,7 +1399,7 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
}
/*
- * devm_of_pci_get_host_bridge_resources() only sets up translateable resources,
+ * devm_of_pci_get_host_bridge_resources() only sets up translatable resources,
* so we need extra resource setup parsing our special DT properties encoding
* the MEM and IO apertures.
*/
@@ -1649,7 +1625,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
return pci_host_probe(bridge);
}
-static int mvebu_pcie_remove(struct platform_device *pdev)
+static void mvebu_pcie_remove(struct platform_device *pdev)
{
struct mvebu_pcie *pcie = platform_get_drvdata(pdev);
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
@@ -1707,8 +1683,6 @@ static int mvebu_pcie_remove(struct platform_device *pdev)
/* Power down card and disable clocks. Must be the last step. */
mvebu_pcie_powerdown(port);
}
-
- return 0;
}
static const struct of_device_id mvebu_pcie_of_match_table[] = {
@@ -1718,6 +1692,7 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = {
{ .compatible = "marvell,kirkwood-pcie", },
{},
};
+MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
static const struct dev_pm_ops mvebu_pcie_pm_ops = {
NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c
index 839695791757..d29866485361 100644
--- a/drivers/pci/controller/pci-rcar-gen2.c
+++ b/drivers/pci/controller/pci-rcar-gen2.c
@@ -290,8 +290,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
priv = pci_host_bridge_priv(bridge);
bridge->sysdata = priv;
- cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(dev, cfg_res);
+ reg = devm_platform_get_and_ioremap_resource(pdev, 0, &cfg_res);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 929f9363e94b..942ddfca3bf6 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk.h>
+#include <linux/cleanup.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/export.h>
@@ -22,6 +23,7 @@
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -269,7 +271,7 @@ struct tegra_msi {
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
struct mutex map_lock;
- spinlock_t mask_lock;
+ raw_spinlock_t mask_lock;
void *virt;
dma_addr_t phys;
int irq;
@@ -1330,12 +1332,9 @@ static struct phy *devm_of_phy_optional_get_index(struct device *dev,
if (!name)
return ERR_PTR(-ENOMEM);
- phy = devm_of_phy_get(dev, np, name);
+ phy = devm_of_phy_optional_get(dev, np, name);
kfree(name);
- if (PTR_ERR(phy) == -ENODEV)
- phy = NULL;
-
return phy;
}
@@ -1346,7 +1345,7 @@ static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
unsigned int i;
int err;
- port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
+ port->phys = devm_kcalloc(dev, port->lanes, sizeof(phy), GFP_KERNEL);
if (!port->phys)
return -ENOMEM;
@@ -1378,7 +1377,7 @@ static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
struct tegra_pcie_port *port;
int err;
- if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
+ if (!soc->has_gen2 || of_property_present(np, "phys"))
return tegra_pcie_phys_get_legacy(pcie);
list_for_each_entry(port, &pcie->ports, list) {
@@ -1463,7 +1462,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
pcie->cs = *res;
/* constrain configuration space to 4 KiB */
- pcie->cs.end = pcie->cs.start + SZ_4K - 1;
+ resource_set_size(&pcie->cs, SZ_4K);
pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
if (IS_ERR(pcie->cfg)) {
@@ -1550,7 +1549,7 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc)
unsigned int index = i * 32 + offset;
int ret;
- ret = generic_handle_domain_irq(msi->domain->parent, index);
+ ret = generic_handle_domain_irq(msi->domain, index);
if (ret) {
/*
* that's weird who triggered this?
@@ -1568,30 +1567,6 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void tegra_msi_top_irq_ack(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void tegra_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void tegra_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip tegra_msi_top_chip = {
- .name = "Tegra PCIe MSI",
- .irq_ack = tegra_msi_top_irq_ack,
- .irq_mask = tegra_msi_top_irq_mask,
- .irq_unmask = tegra_msi_top_irq_unmask,
-};
-
static void tegra_msi_irq_ack(struct irq_data *d)
{
struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
@@ -1607,14 +1582,13 @@ static void tegra_msi_irq_mask(struct irq_data *d)
struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
struct tegra_pcie *pcie = msi_to_pcie(msi);
unsigned int index = d->hwirq / 32;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
- value &= ~BIT(d->hwirq % 32);
- afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
- spin_unlock_irqrestore(&msi->mask_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+ value &= ~BIT(d->hwirq % 32);
+ afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+ }
}
static void tegra_msi_irq_unmask(struct irq_data *d)
@@ -1622,19 +1596,13 @@ static void tegra_msi_irq_unmask(struct irq_data *d)
struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
struct tegra_pcie *pcie = msi_to_pcie(msi);
unsigned int index = d->hwirq / 32;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
- value |= BIT(d->hwirq % 32);
- afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
- spin_unlock_irqrestore(&msi->mask_lock, flags);
-}
-
-static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+ value |= BIT(d->hwirq % 32);
+ afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+ }
}
static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -1651,7 +1619,6 @@ static struct irq_chip tegra_msi_bottom_chip = {
.irq_ack = tegra_msi_irq_ack,
.irq_mask = tegra_msi_irq_mask,
.irq_unmask = tegra_msi_irq_unmask,
- .irq_set_affinity = tegra_msi_set_affinity,
.irq_compose_msi_msg = tegra_compose_msi_msg,
};
@@ -1699,42 +1666,40 @@ static const struct irq_domain_ops tegra_msi_domain_ops = {
.free = tegra_msi_domain_free,
};
-static struct msi_domain_info tegra_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &tegra_msi_top_chip,
+static const struct msi_parent_ops tegra_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSI_MASK_PARENT |
+ MSI_FLAG_NO_AFFINITY),
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int tegra_allocate_domains(struct tegra_msi *msi)
{
struct tegra_pcie *pcie = msi_to_pcie(msi);
struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
- struct irq_domain *parent;
-
- parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
- &tegra_msi_domain_ops, msi);
- if (!parent) {
- dev_err(pcie->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+ struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .ops = &tegra_msi_domain_ops,
+ .size = INT_PCI_MSI_NR,
+ .host_data = msi,
+ };
- msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent);
+ msi->domain = msi_create_parent_irq_domain(&info, &tegra_msi_parent_ops);
if (!msi->domain) {
dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
return -ENOMEM;
}
-
return 0;
}
static void tegra_free_domains(struct tegra_msi *msi)
{
- struct irq_domain *parent = msi->domain->parent;
-
irq_domain_remove(msi->domain);
- irq_domain_remove(parent);
}
static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
@@ -1745,7 +1710,7 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
int err;
mutex_init(&msi->map_lock);
- spin_lock_init(&msi->mask_lock);
+ raw_spin_lock_init(&msi->mask_lock);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = tegra_allocate_domains(msi);
@@ -1947,7 +1912,7 @@ static bool of_regulator_bulk_available(struct device_node *np,
for (i = 0; i < num_supplies; i++) {
snprintf(property, 32, "%s-supply", supplies[i].supply);
- if (of_find_property(np, property, NULL) == NULL)
+ if (!of_property_present(np, property))
return false;
}
@@ -2115,47 +2080,39 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
- struct device_node *np = dev->of_node, *port;
+ struct device_node *np = dev->of_node;
const struct tegra_pcie_soc *soc = pcie->soc;
u32 lanes = 0, mask = 0;
unsigned int lane = 0;
int err;
/* parse root ports */
- for_each_child_of_node(np, port) {
+ for_each_child_of_node_scoped(np, port) {
struct tegra_pcie_port *rp;
unsigned int index;
u32 value;
char *label;
err = of_pci_get_devfn(port);
- if (err < 0) {
- dev_err(dev, "failed to parse address: %d\n", err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse address\n");
index = PCI_SLOT(err);
- if (index < 1 || index > soc->num_ports) {
- dev_err(dev, "invalid port number: %d\n", index);
- err = -EINVAL;
- goto err_node_put;
- }
+ if (index < 1 || index > soc->num_ports)
+ return dev_err_probe(dev, -EINVAL,
+ "invalid port number: %d\n", index);
index--;
err = of_property_read_u32(port, "nvidia,num-lanes", &value);
- if (err < 0) {
- dev_err(dev, "failed to parse # of lanes: %d\n",
- err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err,
+ "failed to parse # of lanes\n");
- if (value > 16) {
- dev_err(dev, "invalid # of lanes: %u\n", value);
- err = -EINVAL;
- goto err_node_put;
- }
+ if (value > 16)
+ return dev_err_probe(dev, -EINVAL,
+ "invalid # of lanes: %u\n", value);
lanes |= value << (index << 3);
@@ -2168,16 +2125,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
lane += value;
rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
- if (!rp) {
- err = -ENOMEM;
- goto err_node_put;
- }
+ if (!rp)
+ return -ENOMEM;
err = of_address_to_resource(port, 0, &rp->regs);
- if (err < 0) {
- dev_err(dev, "failed to parse address: %d\n", err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse address\n");
INIT_LIST_HEAD(&rp->list);
rp->index = index;
@@ -2186,16 +2139,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->np = port;
rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
- if (IS_ERR(rp->base)) {
- err = PTR_ERR(rp->base);
- goto err_node_put;
- }
+ if (IS_ERR(rp->base))
+ return PTR_ERR(rp->base);
label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
- if (!label) {
- err = -ENOMEM;
- goto err_node_put;
- }
+ if (!label)
+ return -ENOMEM;
/*
* Returns -ENOENT if reset-gpios property is not populated
@@ -2208,34 +2157,26 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
GPIOD_OUT_LOW,
label);
if (IS_ERR(rp->reset_gpio)) {
- if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
+ if (PTR_ERR(rp->reset_gpio) == -ENOENT)
rp->reset_gpio = NULL;
- } else {
- dev_err(dev, "failed to get reset GPIO: %ld\n",
- PTR_ERR(rp->reset_gpio));
- err = PTR_ERR(rp->reset_gpio);
- goto err_node_put;
- }
+ else
+ return dev_err_probe(dev, PTR_ERR(rp->reset_gpio),
+ "failed to get reset GPIO\n");
}
list_add_tail(&rp->list, &pcie->ports);
}
err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
- if (err < 0) {
- dev_err(dev, "invalid lane configuration\n");
- return err;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err,
+ "invalid lane configuration\n");
err = tegra_pcie_get_regulators(pcie, mask);
if (err < 0)
return err;
return 0;
-
-err_node_put:
- of_node_put(port);
- return err;
}
/*
@@ -2683,7 +2624,7 @@ put_resources:
return err;
}
-static int tegra_pcie_remove(struct platform_device *pdev)
+static void tegra_pcie_remove(struct platform_device *pdev)
{
struct tegra_pcie *pcie = platform_get_drvdata(pdev);
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
@@ -2704,8 +2645,6 @@ static int tegra_pcie_remove(struct platform_device *pdev)
list_for_each_entry_safe(port, tmp, &pcie->ports, list)
tegra_pcie_port_free(port);
-
- return 0;
}
static int tegra_pcie_pm_suspend(struct device *dev)
@@ -2814,4 +2753,3 @@ static struct platform_driver tegra_pcie_driver = {
.remove = tegra_pcie_remove,
};
module_platform_driver(tegra_pcie_driver);
-MODULE_LICENSE("GPL");
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index b5bd10a62adb..b5b4a958e6a2 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -11,6 +11,8 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "pci-host-common.h"
+
#if defined(CONFIG_PCI_HOST_THUNDER_ECAM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
static void set_val(u32 v, int where, int size, u32 *val)
@@ -204,7 +206,7 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
v = readl(addr);
if (v & 0xff00)
- pr_err("Bad MSIX cap header: %08x\n", v);
+ pr_err("Bad MSI-X cap header: %08x\n", v);
v |= 0xbc00; /* next capability is EA at 0xbc */
set_val(v, where, size, val);
return PCIBIOS_SUCCESSFUL;
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index 06a9855cb431..5fa037fb61dc 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "../pci.h"
+#include "pci-host-common.h"
#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
@@ -400,9 +401,9 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg)
* Reserve 64K size PEM specific resources. The full 16M range
* size is required for thunder_pem_init() call.
*/
- res_pem->end = res_pem->start + SZ_64K - 1;
+ resource_set_size(res_pem, SZ_64K);
thunder_pem_reserve_range(dev, root->segment, res_pem);
- res_pem->end = res_pem->start + SZ_16M - 1;
+ resource_set_size(res_pem, SZ_16M);
/* Reserve PCI configuration space as well. */
thunder_pem_reserve_range(dev, root->segment, &cfg->res);
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c
index ca44b0c83d1b..460a825325dd 100644
--- a/drivers/pci/controller/pci-v3-semi.c
+++ b/drivers/pci/controller/pci-v3-semi.c
@@ -20,8 +20,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -736,8 +735,7 @@ static int v3_pci_probe(struct platform_device *pdev)
return ret;
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- v3->base = devm_ioremap_resource(dev, regs);
+ v3->base = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
if (IS_ERR(v3->base))
return PTR_ERR(v3->base);
/*
diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c
index 7991d334e0f1..e9a6758fe2c1 100644
--- a/drivers/pci/controller/pci-versatile.c
+++ b/drivers/pci/controller/pci-versatile.c
@@ -169,4 +169,3 @@ static struct platform_driver versatile_pci_driver = {
module_platform_driver(versatile_pci_driver);
MODULE_DESCRIPTION("Versatile PCI driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index d7987b281f79..654639bccd10 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -6,12 +6,14 @@
* Author: Tanmay Inamdar <tinamdar@apm.com>
* Duc Dang <dhdang@apm.com>
*/
+#include <linux/bitfield.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/of_pci.h>
@@ -21,46 +23,49 @@
#define IDX_PER_GROUP 8
#define IRQS_PER_IDX 16
#define NR_HW_IRQS 16
-#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
+#define NR_MSI_BITS (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
+#define NR_MSI_VEC (NR_MSI_BITS / num_possible_cpus())
-struct xgene_msi_group {
- struct xgene_msi *msi;
- int gic_irq;
- u32 msi_grp;
-};
+#define MSI_GROUP_MASK GENMASK(22, 19)
+#define MSI_INDEX_MASK GENMASK(18, 16)
+#define MSI_INTR_MASK GENMASK(19, 16)
+
+#define MSInRx_HWIRQ_MASK GENMASK(6, 4)
+#define DATA_HWIRQ_MASK GENMASK(3, 0)
struct xgene_msi {
- struct device_node *node;
struct irq_domain *inner_domain;
- struct irq_domain *msi_domain;
u64 msi_addr;
void __iomem *msi_regs;
unsigned long *bitmap;
struct mutex bitmap_lock;
- struct xgene_msi_group *msi_groups;
- int num_cpus;
+ unsigned int gic_irq[NR_HW_IRQS];
};
/* Global data */
-static struct xgene_msi xgene_msi_ctrl;
-
-static struct irq_chip xgene_msi_top_irq_chip = {
- .name = "X-Gene1 MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static struct msi_domain_info xgene_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &xgene_msi_top_irq_chip,
-};
+static struct xgene_msi *xgene_msi_ctrl;
/*
- * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where
- * n is group number (0..F), x is index of registers in each group (0..7)
+ * X-Gene v1 has 16 frames of MSI termination registers MSInIRx, where n is
+ * frame number (0..15), x is index of registers in each frame (0..7). Each
+ * 32b register is at the beginning of a 64kB region, each frame occupying
+ * 512kB (and the whole thing 8MB of PA space).
+ *
+ * Each register supports 16 MSI vectors (0..15) to generate interrupts. A
+ * write to the MSInIRx from the PCI side generates an interrupt. A read
+ * from the MSInRx on the CPU side returns a bitmap of the pending MSIs in
+ * the lower 16 bits. A side effect of this read is that all pending
+ * interrupts are acknowledged and cleared).
+ *
+ * Additionally, each MSI termination frame has 1 MSIINTn register (n is
+ * 0..15) to indicate the MSI pending status caused by any of its 8
+ * termination registers, reported as a bitmap in the lower 8 bits. Each 32b
+ * register is at the beginning of a 64kB region (and overall occupying an
+ * extra 1MB).
+ *
+ * There is one GIC IRQ assigned for each MSI termination frame, 16 in
+ * total.
+ *
* The register layout is as follows:
* MSI0IR0 base_addr
* MSI0IR1 base_addr + 0x10000
@@ -81,107 +86,74 @@ static struct msi_domain_info xgene_msi_domain_info = {
* MSIINT1 base_addr + 0x810000
* ... ...
* MSIINTF base_addr + 0x8F0000
- *
- * Each index register supports 16 MSI vectors (0..15) to generate interrupt.
- * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination
- * registers.
- *
- * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate
- * the MSI pending status caused by 1 of its 8 index registers.
*/
/* MSInIRx read helper */
-static u32 xgene_msi_ir_read(struct xgene_msi *msi,
- u32 msi_grp, u32 msir_idx)
+static u32 xgene_msi_ir_read(struct xgene_msi *msi, u32 msi_grp, u32 msir_idx)
{
return readl_relaxed(msi->msi_regs + MSI_IR0 +
- (msi_grp << 19) + (msir_idx << 16));
+ (FIELD_PREP(MSI_GROUP_MASK, msi_grp) |
+ FIELD_PREP(MSI_INDEX_MASK, msir_idx)));
}
/* MSIINTn read helper */
static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp)
{
- return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16));
+ return readl_relaxed(msi->msi_regs + MSI_INT0 +
+ FIELD_PREP(MSI_INTR_MASK, msi_grp));
}
/*
- * With 2048 MSI vectors supported, the MSI message can be constructed using
- * following scheme:
- * - Divide into 8 256-vector groups
- * Group 0: 0-255
- * Group 1: 256-511
- * Group 2: 512-767
- * ...
- * Group 7: 1792-2047
- * - Each 256-vector group is divided into 16 16-vector groups
- * As an example: 16 16-vector groups for 256-vector group 0-255 is
- * Group 0: 0-15
- * Group 1: 16-32
- * ...
- * Group 15: 240-255
- * - The termination address of MSI vector in 256-vector group n and 16-vector
- * group x is the address of MSIxIRn
- * - The data for MSI vector in 16-vector group x is x
+ * In order to allow an MSI to be moved from one CPU to another without
+ * having to repaint both the address and the data (which cannot be done
+ * atomically), we statically partitions the MSI frames between CPUs. Given
+ * that XGene-1 has 8 CPUs, each CPU gets two frames assigned to it
+ *
+ * We adopt the convention that when an MSI is moved, it is configured to
+ * target the same register number in the congruent frame assigned to the
+ * new target CPU. This reserves a given MSI across all CPUs, and reduces
+ * the MSI capacity from 2048 to 256.
+ *
+ * Effectively, this amounts to:
+ * - hwirq[7]::cpu[2:0] is the target frame number (n in MSInIRx)
+ * - hwirq[6:4] is the register index in any given frame (x in MSInIRx)
+ * - hwirq[3:0] is the MSI data
*/
-static u32 hwirq_to_reg_set(unsigned long hwirq)
+static irq_hw_number_t compute_hwirq(u8 frame, u8 index, u8 data)
{
- return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX));
-}
-
-static u32 hwirq_to_group(unsigned long hwirq)
-{
- return (hwirq % NR_HW_IRQS);
-}
-
-static u32 hwirq_to_msi_data(unsigned long hwirq)
-{
- return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX);
+ return (FIELD_PREP(BIT(7), FIELD_GET(BIT(3), frame)) |
+ FIELD_PREP(MSInRx_HWIRQ_MASK, index) |
+ FIELD_PREP(DATA_HWIRQ_MASK, data));
}
static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct xgene_msi *msi = irq_data_get_irq_chip_data(data);
- u32 reg_set = hwirq_to_reg_set(data->hwirq);
- u32 group = hwirq_to_group(data->hwirq);
- u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16);
+ u64 target_addr;
+ u32 frame, msir;
+ int cpu;
- msg->address_hi = upper_32_bits(target_addr);
- msg->address_lo = lower_32_bits(target_addr);
- msg->data = hwirq_to_msi_data(data->hwirq);
-}
+ cpu = cpumask_first(irq_data_get_effective_affinity_mask(data));
+ msir = FIELD_GET(MSInRx_HWIRQ_MASK, data->hwirq);
+ frame = FIELD_PREP(BIT(3), FIELD_GET(BIT(7), data->hwirq)) | cpu;
-/*
- * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain
- * the expected behaviour of .set_affinity for each MSI interrupt, the 16
- * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs
- * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another
- * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a
- * consequence, the total MSI vectors that X-Gene v1 supports will be
- * reduced to 256 (2048/8) vectors.
- */
-static int hwirq_to_cpu(unsigned long hwirq)
-{
- return (hwirq % xgene_msi_ctrl.num_cpus);
-}
+ target_addr = msi->msi_addr;
+ target_addr += (FIELD_PREP(MSI_GROUP_MASK, frame) |
+ FIELD_PREP(MSI_INTR_MASK, msir));
-static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq)
-{
- return (hwirq - hwirq_to_cpu(hwirq));
+ msg->address_hi = upper_32_bits(target_addr);
+ msg->address_lo = lower_32_bits(target_addr);
+ msg->data = FIELD_GET(DATA_HWIRQ_MASK, data->hwirq);
}
static int xgene_msi_set_affinity(struct irq_data *irqdata,
const struct cpumask *mask, bool force)
{
int target_cpu = cpumask_first(mask);
- int curr_cpu;
-
- curr_cpu = hwirq_to_cpu(irqdata->hwirq);
- if (curr_cpu == target_cpu)
- return IRQ_SET_MASK_OK_DONE;
- /* Update MSI number to target the new CPU */
- irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu;
+ irq_data_update_effective_affinity(irqdata, cpumask_of(target_cpu));
+ /* Force the core code to regenerate the message */
return IRQ_SET_MASK_OK;
}
@@ -195,25 +167,23 @@ static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
struct xgene_msi *msi = domain->host_data;
- int msi_irq;
+ irq_hw_number_t hwirq;
mutex_lock(&msi->bitmap_lock);
- msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0,
- msi->num_cpus, 0);
- if (msi_irq < NR_MSI_VEC)
- bitmap_set(msi->bitmap, msi_irq, msi->num_cpus);
- else
- msi_irq = -ENOSPC;
+ hwirq = find_first_zero_bit(msi->bitmap, NR_MSI_VEC);
+ if (hwirq < NR_MSI_VEC)
+ set_bit(hwirq, msi->bitmap);
mutex_unlock(&msi->bitmap_lock);
- if (msi_irq < 0)
- return msi_irq;
+ if (hwirq >= NR_MSI_VEC)
+ return -ENOSPC;
- irq_domain_set_info(domain, virq, msi_irq,
+ irq_domain_set_info(domain, virq, hwirq,
&xgene_msi_bottom_irq_chip, domain->host_data,
handle_simple_irq, NULL, NULL);
+ irqd_set_resend_when_in_progress(irq_get_irq_data(virq));
return 0;
}
@@ -223,206 +193,149 @@ static void xgene_irq_domain_free(struct irq_domain *domain,
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct xgene_msi *msi = irq_data_get_irq_chip_data(d);
- u32 hwirq;
mutex_lock(&msi->bitmap_lock);
- hwirq = hwirq_to_canonical_hwirq(d->hwirq);
- bitmap_clear(msi->bitmap, hwirq, msi->num_cpus);
+ clear_bit(d->hwirq, msi->bitmap);
mutex_unlock(&msi->bitmap_lock);
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
-static const struct irq_domain_ops msi_domain_ops = {
+static const struct irq_domain_ops xgene_msi_domain_ops = {
.alloc = xgene_irq_domain_alloc,
.free = xgene_irq_domain_free,
};
-static int xgene_allocate_domains(struct xgene_msi *msi)
-{
- msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
- &msi_domain_ops, msi);
- if (!msi->inner_domain)
- return -ENOMEM;
-
- msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node),
- &xgene_msi_domain_info,
- msi->inner_domain);
-
- if (!msi->msi_domain) {
- irq_domain_remove(msi->inner_domain);
- return -ENOMEM;
- }
-
- return 0;
-}
+static const struct msi_parent_ops xgene_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS),
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
-static void xgene_free_domains(struct xgene_msi *msi)
+static int xgene_allocate_domains(struct device_node *node,
+ struct xgene_msi *msi)
{
- if (msi->msi_domain)
- irq_domain_remove(msi->msi_domain);
- if (msi->inner_domain)
- irq_domain_remove(msi->inner_domain);
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(node),
+ .ops = &xgene_msi_domain_ops,
+ .size = NR_MSI_VEC,
+ .host_data = msi,
+ };
+
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &xgene_msi_parent_ops);
+ return msi->inner_domain ? 0 : -ENOMEM;
}
-static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
+static int xgene_msi_init_allocator(struct device *dev)
{
- xgene_msi->bitmap = bitmap_zalloc(NR_MSI_VEC, GFP_KERNEL);
- if (!xgene_msi->bitmap)
+ xgene_msi_ctrl->bitmap = devm_bitmap_zalloc(dev, NR_MSI_VEC, GFP_KERNEL);
+ if (!xgene_msi_ctrl->bitmap)
return -ENOMEM;
- mutex_init(&xgene_msi->bitmap_lock);
-
- xgene_msi->msi_groups = kcalloc(NR_HW_IRQS,
- sizeof(struct xgene_msi_group),
- GFP_KERNEL);
- if (!xgene_msi->msi_groups)
- return -ENOMEM;
+ mutex_init(&xgene_msi_ctrl->bitmap_lock);
return 0;
}
static void xgene_msi_isr(struct irq_desc *desc)
{
+ unsigned int *irqp = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct xgene_msi_group *msi_groups;
- struct xgene_msi *xgene_msi;
- int msir_index, msir_val, hw_irq, ret;
- u32 intr_index, grp_select, msi_grp;
+ struct xgene_msi *xgene_msi = xgene_msi_ctrl;
+ unsigned long grp_pending;
+ int msir_idx;
+ u32 msi_grp;
chained_irq_enter(chip, desc);
- msi_groups = irq_desc_get_handler_data(desc);
- xgene_msi = msi_groups->msi;
- msi_grp = msi_groups->msi_grp;
-
- /*
- * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt
- * If bit x of this register is set (x is 0..7), one or more interrupts
- * corresponding to MSInIRx is set.
- */
- grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
- while (grp_select) {
- msir_index = ffs(grp_select) - 1;
- /*
- * Calculate MSInIRx address to read to check for interrupts
- * (refer to termination address and data assignment
- * described in xgene_compose_msi_msg() )
- */
- msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index);
- while (msir_val) {
- intr_index = ffs(msir_val) - 1;
- /*
- * Calculate MSI vector number (refer to the termination
- * address and data assignment described in
- * xgene_compose_msi_msg function)
- */
- hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) *
- NR_HW_IRQS) + msi_grp;
- /*
- * As we have multiple hw_irq that maps to single MSI,
- * always look up the virq using the hw_irq as seen from
- * CPU0
- */
- hw_irq = hwirq_to_canonical_hwirq(hw_irq);
- ret = generic_handle_domain_irq(xgene_msi->inner_domain, hw_irq);
+ msi_grp = irqp - xgene_msi->gic_irq;
+
+ grp_pending = xgene_msi_int_read(xgene_msi, msi_grp);
+
+ for_each_set_bit(msir_idx, &grp_pending, IDX_PER_GROUP) {
+ unsigned long msir;
+ int intr_idx;
+
+ msir = xgene_msi_ir_read(xgene_msi, msi_grp, msir_idx);
+
+ for_each_set_bit(intr_idx, &msir, IRQS_PER_IDX) {
+ irq_hw_number_t hwirq;
+ int ret;
+
+ hwirq = compute_hwirq(msi_grp, msir_idx, intr_idx);
+ ret = generic_handle_domain_irq(xgene_msi->inner_domain,
+ hwirq);
WARN_ON_ONCE(ret);
- msir_val &= ~(1 << intr_index);
- }
- grp_select &= ~(1 << msir_index);
-
- if (!grp_select) {
- /*
- * We handled all interrupts happened in this group,
- * resample this group MSI_INTx register in case
- * something else has been made pending in the meantime
- */
- grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
}
}
chained_irq_exit(chip, desc);
}
-static enum cpuhp_state pci_xgene_online;
-
-static int xgene_msi_remove(struct platform_device *pdev)
+static void xgene_msi_remove(struct platform_device *pdev)
{
- struct xgene_msi *msi = platform_get_drvdata(pdev);
-
- if (pci_xgene_online)
- cpuhp_remove_state(pci_xgene_online);
- cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD);
-
- kfree(msi->msi_groups);
-
- bitmap_free(msi->bitmap);
- msi->bitmap = NULL;
-
- xgene_free_domains(msi);
+ for (int i = 0; i < NR_HW_IRQS; i++) {
+ unsigned int irq = xgene_msi_ctrl->gic_irq[i];
+ if (!irq)
+ continue;
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
+ }
- return 0;
+ if (xgene_msi_ctrl->inner_domain)
+ irq_domain_remove(xgene_msi_ctrl->inner_domain);
}
-static int xgene_msi_hwirq_alloc(unsigned int cpu)
+static int xgene_msi_handler_setup(struct platform_device *pdev)
{
- struct xgene_msi *msi = &xgene_msi_ctrl;
- struct xgene_msi_group *msi_group;
- cpumask_var_t mask;
+ struct xgene_msi *xgene_msi = xgene_msi_ctrl;
int i;
- int err;
- for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
- msi_group = &msi->msi_groups[i];
- if (!msi_group->gic_irq)
- continue;
+ for (i = 0; i < NR_HW_IRQS; i++) {
+ u32 msi_val;
+ int irq, err;
+
+ /*
+ * MSInIRx registers are read-to-clear; before registering
+ * interrupt handlers, read all of them to clear spurious
+ * interrupts that may occur before the driver is probed.
+ */
+ for (int msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++)
+ xgene_msi_ir_read(xgene_msi, i, msi_idx);
+
+ /* Read MSIINTn to confirm */
+ msi_val = xgene_msi_int_read(xgene_msi, i);
+ if (msi_val) {
+ dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ return irq;
- irq_set_chained_handler_and_data(msi_group->gic_irq,
- xgene_msi_isr, msi_group);
+ xgene_msi->gic_irq[i] = irq;
/*
* Statically allocate MSI GIC IRQs to each CPU core.
* With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
* to each core.
*/
- if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
- cpumask_clear(mask);
- cpumask_set_cpu(cpu, mask);
- err = irq_set_affinity(msi_group->gic_irq, mask);
- if (err)
- pr_err("failed to set affinity for GIC IRQ");
- free_cpumask_var(mask);
- } else {
- pr_err("failed to alloc CPU mask for affinity\n");
- err = -EINVAL;
- }
-
+ irq_set_status_flags(irq, IRQ_NO_BALANCING);
+ err = irq_set_affinity(irq, cpumask_of(i % num_possible_cpus()));
if (err) {
- irq_set_chained_handler_and_data(msi_group->gic_irq,
- NULL, NULL);
+ pr_err("failed to set affinity for GIC IRQ");
return err;
}
- }
-
- return 0;
-}
-
-static int xgene_msi_hwirq_free(unsigned int cpu)
-{
- struct xgene_msi *msi = &xgene_msi_ctrl;
- struct xgene_msi_group *msi_group;
- int i;
-
- for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
- msi_group = &msi->msi_groups[i];
- if (!msi_group->gic_irq)
- continue;
- irq_set_chained_handler_and_data(msi_group->gic_irq, NULL,
- NULL);
+ irq_set_chained_handler_and_data(irq, xgene_msi_isr,
+ &xgene_msi_ctrl->gic_irq[i]);
}
+
return 0;
}
@@ -434,82 +347,42 @@ static const struct of_device_id xgene_msi_match_table[] = {
static int xgene_msi_probe(struct platform_device *pdev)
{
struct resource *res;
- int rc, irq_index;
struct xgene_msi *xgene_msi;
- int virt_msir;
- u32 msi_val, msi_idx;
+ int rc;
- xgene_msi = &xgene_msi_ctrl;
+ xgene_msi_ctrl = devm_kzalloc(&pdev->dev, sizeof(*xgene_msi_ctrl),
+ GFP_KERNEL);
+ if (!xgene_msi_ctrl)
+ return -ENOMEM;
- platform_set_drvdata(pdev, xgene_msi);
+ xgene_msi = xgene_msi_ctrl;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res);
+ xgene_msi->msi_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(xgene_msi->msi_regs)) {
rc = PTR_ERR(xgene_msi->msi_regs);
goto error;
}
xgene_msi->msi_addr = res->start;
- xgene_msi->node = pdev->dev.of_node;
- xgene_msi->num_cpus = num_possible_cpus();
- rc = xgene_msi_init_allocator(xgene_msi);
+ rc = xgene_msi_init_allocator(&pdev->dev);
if (rc) {
dev_err(&pdev->dev, "Error allocating MSI bitmap\n");
goto error;
}
- rc = xgene_allocate_domains(xgene_msi);
+ rc = xgene_allocate_domains(dev_of_node(&pdev->dev), xgene_msi);
if (rc) {
dev_err(&pdev->dev, "Failed to allocate MSI domain\n");
goto error;
}
- for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
- virt_msir = platform_get_irq(pdev, irq_index);
- if (virt_msir < 0) {
- rc = virt_msir;
- goto error;
- }
- xgene_msi->msi_groups[irq_index].gic_irq = virt_msir;
- xgene_msi->msi_groups[irq_index].msi_grp = irq_index;
- xgene_msi->msi_groups[irq_index].msi = xgene_msi;
- }
-
- /*
- * MSInIRx registers are read-to-clear; before registering
- * interrupt handlers, read all of them to clear spurious
- * interrupts that may occur before the driver is probed.
- */
- for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
- for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++)
- xgene_msi_ir_read(xgene_msi, irq_index, msi_idx);
-
- /* Read MSIINTn to confirm */
- msi_val = xgene_msi_int_read(xgene_msi, irq_index);
- if (msi_val) {
- dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
- rc = -EINVAL;
- goto error;
- }
- }
-
- rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
- xgene_msi_hwirq_alloc, NULL);
- if (rc < 0)
- goto err_cpuhp;
- pci_xgene_online = rc;
- rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
- xgene_msi_hwirq_free);
+ rc = xgene_msi_handler_setup(pdev);
if (rc)
- goto err_cpuhp;
+ goto error;
dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
return 0;
-
-err_cpuhp:
- dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
error:
xgene_msi_remove(pdev);
return rc;
@@ -523,9 +396,4 @@ static struct platform_driver xgene_msi_driver = {
.probe = xgene_msi_probe,
.remove = xgene_msi_remove,
};
-
-static int __init xgene_pcie_msi_init(void)
-{
- return platform_driver_register(&xgene_msi_driver);
-}
-subsys_initcall(xgene_pcie_msi_init);
+builtin_platform_driver(xgene_msi_driver);
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index 887b4941ff32..b95afa35201d 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -12,6 +12,7 @@
#include <linux/jiffies.h>
#include <linux/memblock.h>
#include <linux/init.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -53,11 +54,9 @@
#define XGENE_V1_PCI_EXP_CAP 0x40
/* PCIe IP version */
-#define XGENE_PCIE_IP_VER_UNKN 0
#define XGENE_PCIE_IP_VER_1 1
#define XGENE_PCIE_IP_VER_2 2
-#if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
struct xgene_pcie {
struct device_node *node;
struct device *dev;
@@ -163,31 +162,31 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
struct xgene_pcie *port = pcie_bus_to_port(bus);
+ int ret;
- if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) !=
- PCIBIOS_SUCCESSFUL)
- return PCIBIOS_DEVICE_NOT_FOUND;
+ ret = pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
/*
* The v1 controller has a bug in its Configuration Request Retry
- * Status (CRS) logic: when CRS Software Visibility is enabled and
+ * Status (RRS) logic: when RRS Software Visibility is enabled and
* we read the Vendor and Device ID of a non-existent device, the
* controller fabricates return data of 0xFFFF0001 ("device exists
* but is not ready") instead of 0xFFFFFFFF (PCI_ERROR_RESPONSE)
* ("device does not exist"). This causes the PCI core to retry
* the read until it times out. Avoid this by not claiming to
- * support CRS SV.
+ * support RRS SV.
*/
if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) &&
((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL))
- *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+ *val &= ~(PCI_EXP_RTCAP_RRS_SV << 16);
if (size <= 2)
*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
return PCIBIOS_SUCCESSFUL;
}
-#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
static int xgene_get_csr_resource(struct acpi_device *adev,
@@ -278,7 +277,6 @@ const struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
};
#endif
-#if defined(CONFIG_PCI_XGENE)
static u64 xgene_pcie_set_ib_mask(struct xgene_pcie *port, u32 addr,
u32 flags, u64 size)
{
@@ -593,6 +591,24 @@ static struct pci_ops xgene_pcie_ops = {
.write = pci_generic_config_write32,
};
+static bool xgene_check_pcie_msi_ready(void)
+{
+ struct device_node *np;
+ struct irq_domain *d;
+
+ if (!IS_ENABLED(CONFIG_PCI_XGENE_MSI))
+ return true;
+
+ np = of_find_compatible_node(NULL, NULL, "apm,xgene1-msi");
+ if (!np)
+ return true;
+
+ d = irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI);
+ of_node_put(np);
+
+ return d && irq_domain_is_msi_parent(d);
+}
+
static int xgene_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -601,6 +617,10 @@ static int xgene_pcie_probe(struct platform_device *pdev)
struct pci_host_bridge *bridge;
int ret;
+ if (!xgene_check_pcie_msi_ready())
+ return dev_err_probe(&pdev->dev, -EPROBE_DEFER,
+ "MSI driver not ready\n");
+
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
if (!bridge)
return -ENOMEM;
@@ -609,10 +629,7 @@ static int xgene_pcie_probe(struct platform_device *pdev)
port->node = of_node_get(dn);
port->dev = dev;
-
- port->version = XGENE_PCIE_IP_VER_UNKN;
- if (of_device_is_compatible(port->node, "apm,xgene-pcie"))
- port->version = XGENE_PCIE_IP_VER_1;
+ port->version = XGENE_PCIE_IP_VER_1;
ret = xgene_pcie_map_reg(port, pdev);
if (ret)
@@ -646,4 +663,3 @@ static struct platform_driver xgene_pcie_driver = {
.probe = xgene_pcie_probe,
};
builtin_platform_driver(xgene_pcie_driver);
-#endif
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index 65e8a20cc442..ea2ca2e70f20 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -29,7 +30,6 @@ struct altera_msi {
DECLARE_BITMAP(used, MAX_MSI_VECTORS);
struct mutex lock; /* protect "used" bitmap */
struct platform_device *pdev;
- struct irq_domain *msi_domain;
struct irq_domain *inner_domain;
void __iomem *csr_base;
void __iomem *vector_base;
@@ -74,18 +74,20 @@ static void altera_msi_isr(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static struct irq_chip altera_msi_irq_chip = {
- .name = "Altera PCIe MSI",
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define ALTERA_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
-static struct msi_domain_info altera_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &altera_msi_irq_chip,
-};
+#define ALTERA_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
+static const struct msi_parent_ops altera_msi_parent_ops = {
+ .required_flags = ALTERA_MSI_FLAGS_REQUIRED,
+ .supported_flags = ALTERA_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "Altera-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct altera_msi *msi = irq_data_get_irq_chip_data(data);
@@ -99,16 +101,9 @@ static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int altera_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip altera_msi_bottom_irq_chip = {
.name = "Altera MSI",
.irq_compose_msi_msg = altera_compose_msi_msg,
- .irq_set_affinity = altera_msi_set_affinity,
};
static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -171,20 +166,16 @@ static const struct irq_domain_ops msi_domain_ops = {
static int altera_allocate_domains(struct altera_msi *msi)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node);
-
- msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
- &msi_domain_ops, msi);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(&msi->pdev->dev),
+ .ops = &msi_domain_ops,
+ .host_data = msi,
+ .size = msi->num_of_vectors,
+ };
+
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &altera_msi_parent_ops);
if (!msi->inner_domain) {
- dev_err(&msi->pdev->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
-
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &altera_msi_domain_info, msi->inner_domain);
- if (!msi->msi_domain) {
dev_err(&msi->pdev->dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->inner_domain);
return -ENOMEM;
}
@@ -193,11 +184,10 @@ static int altera_allocate_domains(struct altera_msi *msi)
static void altera_free_domains(struct altera_msi *msi)
{
- irq_domain_remove(msi->msi_domain);
irq_domain_remove(msi->inner_domain);
}
-static int altera_msi_remove(struct platform_device *pdev)
+static void altera_msi_remove(struct platform_device *pdev)
{
struct altera_msi *msi = platform_get_drvdata(pdev);
@@ -207,7 +197,6 @@ static int altera_msi_remove(struct platform_device *pdev)
altera_free_domains(msi);
platform_set_drvdata(pdev, NULL);
- return 0;
}
static int altera_msi_probe(struct platform_device *pdev)
@@ -291,4 +280,5 @@ static void __exit altera_msi_exit(void)
subsys_initcall(altera_msi_init);
MODULE_DEVICE_TABLE(of, altera_msi_of_match);
module_exit(altera_msi_exit);
+MODULE_DESCRIPTION("Altera PCIe MSI support driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index 18b2361d6462..3dbb7adc421c 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -6,14 +6,14 @@
* Description: Altera PCIe host controller driver
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -56,12 +56,11 @@
#define TLP_READ_TAG 0x1d
#define TLP_WRITE_TAG 0x10
#define RP_DEVFN 0
-#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
#define TLP_CFG_DW0(pcie, cfg) \
(((cfg) << 24) | \
TLP_PAYLOAD_SIZE)
#define TLP_CFG_DW1(pcie, tag, be) \
- (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
+ (((PCI_DEVID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
#define TLP_CFG_DW2(bus, devfn, offset) \
(((bus) << 24) | ((devfn) << 16) | (offset))
#define TLP_COMP_STATUS(s) (((s) >> 13) & 7)
@@ -79,9 +78,25 @@
#define S10_TLP_FMTTYPE_CFGWR0 0x45
#define S10_TLP_FMTTYPE_CFGWR1 0x44
+#define AGLX_RP_CFG_ADDR(pcie, reg) (((pcie)->hip_base) + (reg))
+#define AGLX_RP_SECONDARY(pcie) \
+ readb(AGLX_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS))
+
+#define AGLX_BDF_REG 0x00002004
+#define AGLX_ROOT_PORT_IRQ_STATUS 0x14c
+#define AGLX_ROOT_PORT_IRQ_ENABLE 0x150
+#define CFG_AER BIT(4)
+
+#define AGLX_CFG_TARGET GENMASK(13, 12)
+#define AGLX_CFG_TARGET_TYPE0 0
+#define AGLX_CFG_TARGET_TYPE1 1
+#define AGLX_CFG_TARGET_LOCAL_2000 2
+#define AGLX_CFG_TARGET_LOCAL_3000 3
+
enum altera_pcie_version {
ALTERA_PCIE_V1 = 0,
ALTERA_PCIE_V2,
+ ALTERA_PCIE_V3,
};
struct altera_pcie {
@@ -104,6 +119,11 @@ struct altera_pcie_ops {
int size, u32 *value);
int (*rp_write_cfg)(struct altera_pcie *pcie, u8 busno,
int where, int size, u32 value);
+ int (*ep_read_cfg)(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 *value);
+ int (*ep_write_cfg)(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 value);
+ void (*rp_isr)(struct irq_desc *desc);
};
struct altera_pcie_data {
@@ -114,6 +134,9 @@ struct altera_pcie_data {
u32 cfgrd1;
u32 cfgwr0;
u32 cfgwr1;
+ u32 port_conf_offset;
+ u32 port_irq_status_offset;
+ u32 port_irq_enable_offset;
};
struct tlp_rp_regpair_t {
@@ -133,6 +156,28 @@ static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
return readl_relaxed(pcie->cra_base + reg);
}
+static inline void cra_writew(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writew_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readw(struct altera_pcie *pcie, const u32 reg)
+{
+ return readw_relaxed(pcie->cra_base + reg);
+}
+
+static inline void cra_writeb(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writeb_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readb(struct altera_pcie *pcie, const u32 reg)
+{
+ return readb_relaxed(pcie->cra_base + reg);
+}
+
static bool altera_pcie_link_up(struct altera_pcie *pcie)
{
return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
@@ -147,11 +192,20 @@ static bool s10_altera_pcie_link_up(struct altera_pcie *pcie)
return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA);
}
+static bool aglx_altera_pcie_link_up(struct altera_pcie *pcie)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie,
+ pcie->pcie_data->cap_offset +
+ PCI_EXP_LNKSTA);
+
+ return (readw_relaxed(addr) & PCI_EXP_LNKSTA_DLLLA);
+}
+
/*
* Altera PCIe port uses BAR0 of RC's configuration space as the translation
* from PCI bus to native BUS. Entire DDR region is mapped into PCIe space
* using these registers, so it can be reached by DMA from EP devices.
- * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt
+ * This BAR0 will also access to MSI vector when receiving MSI/MSI-X interrupt
* from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge
* should be hidden during enumeration to avoid the sizing and resource
* allocation by PCIe core.
@@ -427,6 +481,103 @@ static int s10_rp_write_cfg(struct altera_pcie *pcie, u8 busno,
return PCIBIOS_SUCCESSFUL;
}
+static int aglx_rp_read_cfg(struct altera_pcie *pcie, int where,
+ int size, u32 *value)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where);
+
+ switch (size) {
+ case 1:
+ *value = readb_relaxed(addr);
+ break;
+ case 2:
+ *value = readw_relaxed(addr);
+ break;
+ default:
+ *value = readl_relaxed(addr);
+ break;
+ }
+
+ /* Interrupt PIN not programmed in hardware, set to INTA. */
+ if (where == PCI_INTERRUPT_PIN && size == 1 && !(*value))
+ *value = 0x01;
+ else if (where == PCI_INTERRUPT_LINE && !(*value & 0xff00))
+ *value |= 0x0100;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_rp_write_cfg(struct altera_pcie *pcie, u8 busno,
+ int where, int size, u32 value)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where);
+
+ switch (size) {
+ case 1:
+ writeb_relaxed(value, addr);
+ break;
+ case 2:
+ writew_relaxed(value, addr);
+ break;
+ default:
+ writel_relaxed(value, addr);
+ break;
+ }
+
+ /*
+ * Monitor changes to PCI_PRIMARY_BUS register on Root Port
+ * and update local copy of root bus number accordingly.
+ */
+ if (busno == pcie->root_bus_nr && where == PCI_PRIMARY_BUS)
+ pcie->root_bus_nr = value & 0xff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_ep_write_cfg(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 value)
+{
+ cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG);
+ if (busno > AGLX_RP_SECONDARY(pcie))
+ where |= FIELD_PREP(AGLX_CFG_TARGET, AGLX_CFG_TARGET_TYPE1);
+
+ switch (size) {
+ case 1:
+ cra_writeb(pcie, value, where);
+ break;
+ case 2:
+ cra_writew(pcie, value, where);
+ break;
+ default:
+ cra_writel(pcie, value, where);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_ep_read_cfg(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 *value)
+{
+ cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG);
+ if (busno > AGLX_RP_SECONDARY(pcie))
+ where |= FIELD_PREP(AGLX_CFG_TARGET, AGLX_CFG_TARGET_TYPE1);
+
+ switch (size) {
+ case 1:
+ *value = cra_readb(pcie, where);
+ break;
+ case 2:
+ *value = cra_readw(pcie, where);
+ break;
+ default:
+ *value = cra_readl(pcie, where);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
unsigned int devfn, int where, int size,
u32 *value)
@@ -439,6 +590,10 @@ static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
return pcie->pcie_data->ops->rp_read_cfg(pcie, where,
size, value);
+ if (pcie->pcie_data->ops->ep_read_cfg)
+ return pcie->pcie_data->ops->ep_read_cfg(pcie, busno, devfn,
+ where, size, value);
+
switch (size) {
case 1:
byte_en = 1 << (where & 3);
@@ -483,6 +638,10 @@ static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,
return pcie->pcie_data->ops->rp_write_cfg(pcie, busno,
where, size, value);
+ if (pcie->pcie_data->ops->ep_write_cfg)
+ return pcie->pcie_data->ops->ep_write_cfg(pcie, busno, devfn,
+ where, size, value);
+
switch (size) {
case 1:
data32 = (value & 0xff) << shift;
@@ -661,17 +820,41 @@ static void altera_pcie_isr(struct irq_desc *desc)
dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n", bit);
}
}
+ chained_irq_exit(chip, desc);
+}
+
+static void aglx_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct altera_pcie *pcie;
+ struct device *dev;
+ u32 status;
+ int ret;
+ chained_irq_enter(chip, desc);
+ pcie = irq_desc_get_handler_data(desc);
+ dev = &pcie->pdev->dev;
+
+ status = readl(pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_status_offset);
+
+ if (status & CFG_AER) {
+ writel(CFG_AER, (pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_status_offset));
+
+ ret = generic_handle_domain_irq(pcie->irq_domain, 0);
+ if (ret)
+ dev_err_ratelimited(dev, "unexpected IRQ %d\n", pcie->irq);
+ }
chained_irq_exit(chip, desc);
}
static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct device_node *node = dev->of_node;
/* Setup INTx */
- pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
+ pcie->irq_domain = irq_domain_create_linear(dev_fwnode(dev), PCI_NUM_INTX,
&intx_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
@@ -696,9 +879,9 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
if (IS_ERR(pcie->cra_base))
return PTR_ERR(pcie->cra_base);
- if (pcie->pcie_data->version == ALTERA_PCIE_V2) {
- pcie->hip_base =
- devm_platform_ioremap_resource_byname(pdev, "Hip");
+ if (pcie->pcie_data->version == ALTERA_PCIE_V2 ||
+ pcie->pcie_data->version == ALTERA_PCIE_V3) {
+ pcie->hip_base = devm_platform_ioremap_resource_byname(pdev, "Hip");
if (IS_ERR(pcie->hip_base))
return PTR_ERR(pcie->hip_base);
}
@@ -708,7 +891,7 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
if (pcie->irq < 0)
return pcie->irq;
- irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
+ irq_set_chained_handler_and_data(pcie->irq, pcie->pcie_data->ops->rp_isr, pcie);
return 0;
}
@@ -721,6 +904,7 @@ static const struct altera_pcie_ops altera_pcie_ops_1_0 = {
.tlp_read_pkt = tlp_read_packet,
.tlp_write_pkt = tlp_write_packet,
.get_link_status = altera_pcie_link_up,
+ .rp_isr = altera_pcie_isr,
};
static const struct altera_pcie_ops altera_pcie_ops_2_0 = {
@@ -729,6 +913,16 @@ static const struct altera_pcie_ops altera_pcie_ops_2_0 = {
.get_link_status = s10_altera_pcie_link_up,
.rp_read_cfg = s10_rp_read_cfg,
.rp_write_cfg = s10_rp_write_cfg,
+ .rp_isr = altera_pcie_isr,
+};
+
+static const struct altera_pcie_ops altera_pcie_ops_3_0 = {
+ .rp_read_cfg = aglx_rp_read_cfg,
+ .rp_write_cfg = aglx_rp_write_cfg,
+ .get_link_status = aglx_altera_pcie_link_up,
+ .ep_read_cfg = aglx_ep_read_cfg,
+ .ep_write_cfg = aglx_ep_write_cfg,
+ .rp_isr = aglx_isr,
};
static const struct altera_pcie_data altera_pcie_1_0_data = {
@@ -751,11 +945,44 @@ static const struct altera_pcie_data altera_pcie_2_0_data = {
.cfgwr1 = S10_TLP_FMTTYPE_CFGWR1,
};
+static const struct altera_pcie_data altera_pcie_3_0_f_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x14000,
+ .port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS,
+ .port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE,
+};
+
+static const struct altera_pcie_data altera_pcie_3_0_p_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x104000,
+ .port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS,
+ .port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE,
+};
+
+static const struct altera_pcie_data altera_pcie_3_0_r_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x1300,
+ .port_irq_status_offset = 0x0,
+ .port_irq_enable_offset = 0x4,
+};
+
static const struct of_device_id altera_pcie_of_match[] = {
{.compatible = "altr,pcie-root-port-1.0",
.data = &altera_pcie_1_0_data },
{.compatible = "altr,pcie-root-port-2.0",
.data = &altera_pcie_2_0_data },
+ {.compatible = "altr,pcie-root-port-3.0-f-tile",
+ .data = &altera_pcie_3_0_f_tile_data },
+ {.compatible = "altr,pcie-root-port-3.0-p-tile",
+ .data = &altera_pcie_3_0_p_tile_data },
+ {.compatible = "altr,pcie-root-port-3.0-r-tile",
+ .data = &altera_pcie_3_0_r_tile_data },
{},
};
@@ -793,11 +1020,18 @@ static int altera_pcie_probe(struct platform_device *pdev)
return ret;
}
- /* clear all interrupts */
- cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
- /* enable all interrupts */
- cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
- altera_pcie_host_init(pcie);
+ if (pcie->pcie_data->version == ALTERA_PCIE_V1 ||
+ pcie->pcie_data->version == ALTERA_PCIE_V2) {
+ /* clear all interrupts */
+ cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
+ /* enable all interrupts */
+ cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
+ altera_pcie_host_init(pcie);
+ } else if (pcie->pcie_data->version == ALTERA_PCIE_V3) {
+ writel(CFG_AER,
+ pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_enable_offset);
+ }
bridge->sysdata = pcie;
bridge->busnr = pcie->root_bus_nr;
@@ -806,7 +1040,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
return pci_host_probe(bridge);
}
-static int altera_pcie_remove(struct platform_device *pdev)
+static void altera_pcie_remove(struct platform_device *pdev)
{
struct altera_pcie *pcie = platform_get_drvdata(pdev);
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
@@ -814,19 +1048,18 @@ static int altera_pcie_remove(struct platform_device *pdev)
pci_stop_root_bus(bridge->bus);
pci_remove_root_bus(bridge->bus);
altera_pcie_irq_teardown(pcie);
-
- return 0;
}
static struct platform_driver altera_pcie_driver = {
- .probe = altera_pcie_probe,
- .remove = altera_pcie_remove,
+ .probe = altera_pcie_probe,
+ .remove = altera_pcie_remove,
.driver = {
- .name = "altera-pcie",
+ .name = "altera-pcie",
.of_match_table = altera_pcie_of_match,
},
};
MODULE_DEVICE_TABLE(of, altera_pcie_of_match);
module_platform_driver(altera_pcie_driver);
+MODULE_DESCRIPTION("Altera PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index 66f37e403a09..2d92fc79f6dd 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -18,18 +18,22 @@
* Author: Marc Zyngier <maz@kernel.org>
*/
+#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/msi.h>
-#include <linux/notifier.h>
#include <linux/of_irq.h>
#include <linux/pci-ecam.h>
+#include "pci-host-common.h"
+
+/* T8103 (original M1) and related SoCs */
#define CORE_RC_PHYIF_CTL 0x00024
#define CORE_RC_PHYIF_CTL_RUN BIT(0)
#define CORE_RC_PHYIF_STAT 0x00028
@@ -40,14 +44,18 @@
#define CORE_RC_STAT_READY BIT(0)
#define CORE_FABRIC_STAT 0x04000
#define CORE_FABRIC_STAT_MASK 0x001F001F
-#define CORE_LANE_CFG(port) (0x84000 + 0x4000 * (port))
-#define CORE_LANE_CFG_REFCLK0REQ BIT(0)
-#define CORE_LANE_CFG_REFCLK1REQ BIT(1)
-#define CORE_LANE_CFG_REFCLK0ACK BIT(2)
-#define CORE_LANE_CFG_REFCLK1ACK BIT(3)
-#define CORE_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
-#define CORE_LANE_CTL(port) (0x84004 + 0x4000 * (port))
-#define CORE_LANE_CTL_CFGACC BIT(15)
+
+#define CORE_PHY_DEFAULT_BASE(port) (0x84000 + 0x4000 * (port))
+
+#define PHY_LANE_CFG 0x00000
+#define PHY_LANE_CFG_REFCLK0REQ BIT(0)
+#define PHY_LANE_CFG_REFCLK1REQ BIT(1)
+#define PHY_LANE_CFG_REFCLK0ACK BIT(2)
+#define PHY_LANE_CFG_REFCLK1ACK BIT(3)
+#define PHY_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
+#define PHY_LANE_CFG_REFCLKCGEN (BIT(30) | BIT(31))
+#define PHY_LANE_CTL 0x00004
+#define PHY_LANE_CTL_CFGACC BIT(15)
#define PORT_LTSSMCTL 0x00080
#define PORT_LTSSMCTL_START BIT(0)
@@ -101,7 +109,7 @@
#define PORT_REFCLK_CGDIS BIT(8)
#define PORT_PERST 0x00814
#define PORT_PERST_OFF BIT(0)
-#define PORT_RID2SID(i16) (0x00828 + 4 * (i16))
+#define PORT_RID2SID 0x00828
#define PORT_RID2SID_VALID BIT(31)
#define PORT_RID2SID_SID_SHIFT 16
#define PORT_RID2SID_BUS_SHIFT 8
@@ -119,7 +127,15 @@
#define PORT_TUNSTAT_PERST_ACK_PEND BIT(1)
#define PORT_PREFMEM_ENABLE 0x00994
-#define MAX_RID2SID 64
+/* T602x (M2-pro and co) */
+#define PORT_T602X_MSIADDR 0x016c
+#define PORT_T602X_MSIADDR_HI 0x0170
+#define PORT_T602X_PERST 0x082c
+#define PORT_T602X_RID2SID 0x3000
+#define PORT_T602X_MSIMAP 0x3800
+
+#define PORT_MSIMAP_ENABLE BIT(31)
+#define PORT_MSIMAP_TARGET GENMASK(7, 0)
/*
* The doorbell address is set to 0xfffff000, which by convention
@@ -130,11 +146,45 @@
*/
#define DOORBELL_ADDR CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
+struct hw_info {
+ u32 phy_lane_ctl;
+ u32 port_msiaddr;
+ u32 port_msiaddr_hi;
+ u32 port_refclk;
+ u32 port_perst;
+ u32 port_rid2sid;
+ u32 port_msimap;
+ u32 max_rid2sid;
+};
+
+static const struct hw_info t8103_hw = {
+ .phy_lane_ctl = PHY_LANE_CTL,
+ .port_msiaddr = PORT_MSIADDR,
+ .port_msiaddr_hi = 0,
+ .port_refclk = PORT_REFCLK,
+ .port_perst = PORT_PERST,
+ .port_rid2sid = PORT_RID2SID,
+ .port_msimap = 0,
+ .max_rid2sid = 64,
+};
+
+static const struct hw_info t602x_hw = {
+ .phy_lane_ctl = 0,
+ .port_msiaddr = PORT_T602X_MSIADDR,
+ .port_msiaddr_hi = PORT_T602X_MSIADDR_HI,
+ .port_refclk = 0,
+ .port_perst = PORT_T602X_PERST,
+ .port_rid2sid = PORT_T602X_RID2SID,
+ .port_msimap = PORT_T602X_MSIMAP,
+ /* 16 on t602x, guess for autodetect on future HW */
+ .max_rid2sid = 512,
+};
+
struct apple_pcie {
struct mutex lock;
struct device *dev;
void __iomem *base;
- struct irq_domain *domain;
+ const struct hw_info *hw;
unsigned long *bitmap;
struct list_head ports;
struct completion event;
@@ -143,12 +193,14 @@ struct apple_pcie {
};
struct apple_pcie_port {
+ raw_spinlock_t lock;
struct apple_pcie *pcie;
struct device_node *np;
void __iomem *base;
+ void __iomem *phy;
struct irq_domain *domain;
struct list_head entry;
- DECLARE_BITMAP(sid_map, MAX_RID2SID);
+ unsigned long *sid_map;
int sid_map_sz;
int idx;
};
@@ -163,27 +215,6 @@ static void rmw_clear(u32 clr, void __iomem *addr)
writel_relaxed(readl_relaxed(addr) & ~clr, addr);
}
-static void apple_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void apple_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip apple_msi_top_chip = {
- .name = "PCIe MSI",
- .irq_mask = apple_msi_top_irq_mask,
- .irq_unmask = apple_msi_top_irq_unmask,
- .irq_eoi = irq_chip_eoi_parent,
- .irq_set_affinity = irq_chip_set_affinity_parent,
- .irq_set_type = irq_chip_set_type_parent,
-};
-
static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
{
msg->address_hi = upper_32_bits(DOORBELL_ADDR);
@@ -227,8 +258,7 @@ static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
- &apple_msi_bottom_chip,
- domain->host_data);
+ &apple_msi_bottom_chip, pcie);
}
return 0;
@@ -252,24 +282,20 @@ static const struct irq_domain_ops apple_msi_domain_ops = {
.free = apple_msi_domain_free,
};
-static struct msi_domain_info apple_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
- .chip = &apple_msi_top_chip,
-};
-
static void apple_port_irq_mask(struct irq_data *data)
{
struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
- writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKSET);
+ guard(raw_spinlock_irqsave)(&port->lock);
+ rmw_set(BIT(data->hwirq), port->base + PORT_INTMSK);
}
static void apple_port_irq_unmask(struct irq_data *data)
{
struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
- writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKCLR);
+ guard(raw_spinlock_irqsave)(&port->lock);
+ rmw_clear(BIT(data->hwirq), port->base + PORT_INTMSK);
}
static bool hwirq_is_intx(unsigned int hwirq)
@@ -373,7 +399,9 @@ static void apple_port_irq_handler(struct irq_desc *desc)
static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
{
struct fwnode_handle *fwnode = &port->np->fwnode;
+ struct apple_pcie *pcie = port->pcie;
unsigned int irq;
+ u32 val = 0;
/* FIXME: consider moving each interrupt under each port */
irq = irq_of_parse_and_map(to_of_node(dev_fwnode(port->pcie->dev)),
@@ -388,20 +416,31 @@ static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
return -ENOMEM;
/* Disable all interrupts */
- writel_relaxed(~0, port->base + PORT_INTMSKSET);
+ writel_relaxed(~0, port->base + PORT_INTMSK);
writel_relaxed(~0, port->base + PORT_INTSTAT);
+ writel_relaxed(~0, port->base + PORT_LINKCMDSTS);
irq_set_chained_handler_and_data(irq, apple_port_irq_handler, port);
/* Configure MSI base address */
BUILD_BUG_ON(upper_32_bits(DOORBELL_ADDR));
- writel_relaxed(lower_32_bits(DOORBELL_ADDR), port->base + PORT_MSIADDR);
+ writel_relaxed(lower_32_bits(DOORBELL_ADDR),
+ port->base + pcie->hw->port_msiaddr);
+ if (pcie->hw->port_msiaddr_hi)
+ writel_relaxed(0, port->base + pcie->hw->port_msiaddr_hi);
/* Enable MSIs, shared between all ports */
- writel_relaxed(0, port->base + PORT_MSIBASE);
- writel_relaxed((ilog2(port->pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT) |
- PORT_MSICFG_EN, port->base + PORT_MSICFG);
+ if (pcie->hw->port_msimap) {
+ for (int i = 0; i < pcie->nvecs; i++)
+ writel_relaxed(FIELD_PREP(PORT_MSIMAP_TARGET, i) |
+ PORT_MSIMAP_ENABLE,
+ port->base + pcie->hw->port_msimap + 4 * i);
+ } else {
+ writel_relaxed(0, port->base + PORT_MSIBASE);
+ val = ilog2(pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT;
+ }
+ writel_relaxed(val | PORT_MSICFG_EN, port->base + PORT_MSICFG);
return 0;
}
@@ -468,43 +507,47 @@ static int apple_pcie_setup_refclk(struct apple_pcie *pcie,
u32 stat;
int res;
- res = readl_relaxed_poll_timeout(pcie->base + CORE_RC_PHYIF_STAT, stat,
- stat & CORE_RC_PHYIF_STAT_REFCLK,
- 100, 50000);
- if (res < 0)
- return res;
+ if (pcie->hw->phy_lane_ctl)
+ rmw_set(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl);
- rmw_set(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
- rmw_set(CORE_LANE_CFG_REFCLK0REQ, pcie->base + CORE_LANE_CFG(port->idx));
+ rmw_set(PHY_LANE_CFG_REFCLK0REQ, port->phy + PHY_LANE_CFG);
- res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
- stat, stat & CORE_LANE_CFG_REFCLK0ACK,
+ res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG,
+ stat, stat & PHY_LANE_CFG_REFCLK0ACK,
100, 50000);
if (res < 0)
return res;
- rmw_set(CORE_LANE_CFG_REFCLK1REQ, pcie->base + CORE_LANE_CFG(port->idx));
- res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
- stat, stat & CORE_LANE_CFG_REFCLK1ACK,
+ rmw_set(PHY_LANE_CFG_REFCLK1REQ, port->phy + PHY_LANE_CFG);
+ res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG,
+ stat, stat & PHY_LANE_CFG_REFCLK1ACK,
100, 50000);
if (res < 0)
return res;
- rmw_clear(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
+ if (pcie->hw->phy_lane_ctl)
+ rmw_clear(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl);
+
+ rmw_set(PHY_LANE_CFG_REFCLKEN, port->phy + PHY_LANE_CFG);
- rmw_set(CORE_LANE_CFG_REFCLKEN, pcie->base + CORE_LANE_CFG(port->idx));
- rmw_set(PORT_REFCLK_EN, port->base + PORT_REFCLK);
+ if (pcie->hw->port_refclk)
+ rmw_set(PORT_REFCLK_EN, port->base + pcie->hw->port_refclk);
return 0;
}
+static void __iomem *port_rid2sid_addr(struct apple_pcie_port *port, int idx)
+{
+ return port->base + port->pcie->hw->port_rid2sid + 4 * idx;
+}
+
static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port,
int idx, u32 val)
{
- writel_relaxed(val, port->base + PORT_RID2SID(idx));
+ writel_relaxed(val, port_rid2sid_addr(port, idx));
/* Read back to ensure completion of the write */
- return readl_relaxed(port->base + PORT_RID2SID(idx));
+ return readl_relaxed(port_rid2sid_addr(port, idx));
}
static int apple_pcie_setup_port(struct apple_pcie *pcie,
@@ -513,6 +556,8 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
struct platform_device *platform = to_platform_device(pcie->dev);
struct apple_pcie_port *port;
struct gpio_desc *reset;
+ struct resource *res;
+ char name[16];
u32 stat, idx;
int ret, i;
@@ -525,6 +570,10 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
if (!port)
return -ENOMEM;
+ port->sid_map = devm_bitmap_zalloc(pcie->dev, pcie->hw->max_rid2sid, GFP_KERNEL);
+ if (!port->sid_map)
+ return -ENOMEM;
+
ret = of_property_read_u32_index(np, "reg", 0, &idx);
if (ret)
return ret;
@@ -534,14 +583,28 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
port->pcie = pcie;
port->np = np;
- port->base = devm_platform_ioremap_resource(platform, port->idx + 2);
+ raw_spin_lock_init(&port->lock);
+
+ snprintf(name, sizeof(name), "port%d", port->idx);
+ res = platform_get_resource_byname(platform, IORESOURCE_MEM, name);
+ if (!res)
+ res = platform_get_resource(platform, IORESOURCE_MEM, port->idx + 2);
+
+ port->base = devm_ioremap_resource(&platform->dev, res);
if (IS_ERR(port->base))
return PTR_ERR(port->base);
+ snprintf(name, sizeof(name), "phy%d", port->idx);
+ res = platform_get_resource_byname(platform, IORESOURCE_MEM, name);
+ if (res)
+ port->phy = devm_ioremap_resource(&platform->dev, res);
+ else
+ port->phy = pcie->base + CORE_PHY_DEFAULT_BASE(port->idx);
+
rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
/* Assert PERST# before setting up the clock */
- gpiod_set_value(reset, 1);
+ gpiod_set_value_cansleep(reset, 1);
ret = apple_pcie_setup_refclk(pcie, port);
if (ret < 0)
@@ -551,8 +614,8 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
usleep_range(100, 200);
/* Deassert PERST# */
- rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
- gpiod_set_value(reset, 0);
+ rmw_set(PORT_PERST_OFF, port->base + pcie->hw->port_perst);
+ gpiod_set_value_cansleep(reset, 0);
/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
msleep(100);
@@ -564,7 +627,11 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return ret;
}
- rmw_clear(PORT_REFCLK_CGDIS, port->base + PORT_REFCLK);
+ if (pcie->hw->port_refclk)
+ rmw_clear(PORT_REFCLK_CGDIS, port->base + pcie->hw->port_refclk);
+ else
+ rmw_set(PHY_LANE_CFG_REFCLKCGEN, port->phy + PHY_LANE_CFG);
+
rmw_clear(PORT_APPCLK_CGDIS, port->base + PORT_APPCLK);
ret = apple_pcie_port_setup_irq(port);
@@ -572,7 +639,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return ret;
/* Reset all RID/SID mappings, and check for RAZ/WI registers */
- for (i = 0; i < MAX_RID2SID; i++) {
+ for (i = 0; i < pcie->hw->max_rid2sid; i++) {
if (apple_pcie_rid2sid_write(port, i, 0xbad1d) != 0xbad1d)
break;
apple_pcie_rid2sid_write(port, i, 0);
@@ -585,6 +652,9 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
list_add_tail(&port->entry, &pcie->ports);
init_completion(&pcie->event);
+ /* In the success path, we keep a reference to np around */
+ of_node_get(np);
+
ret = apple_pcie_port_register_irqs(port);
WARN_ON(ret);
@@ -596,11 +666,28 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return 0;
}
+static const struct msi_parent_ops apple_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSI_MASK_PARENT),
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
static int apple_msi_init(struct apple_pcie *pcie)
{
struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+ struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .ops = &apple_msi_domain_ops,
+ .size = pcie->nvecs,
+ .host_data = pcie,
+ };
struct of_phandle_args args = {};
- struct irq_domain *parent;
int ret;
ret = of_parse_phandle_with_args(to_of_node(fwnode), "msi-ranges",
@@ -620,38 +707,35 @@ static int apple_msi_init(struct apple_pcie *pcie)
if (!pcie->bitmap)
return -ENOMEM;
- parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
- if (!parent) {
+ info.parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
+ if (!info.parent) {
dev_err(pcie->dev, "failed to find parent domain\n");
return -ENXIO;
}
- parent = irq_domain_create_hierarchy(parent, 0, pcie->nvecs, fwnode,
- &apple_msi_domain_ops, pcie);
- if (!parent) {
+ if (!msi_create_parent_irq_domain(&info, &apple_msi_parent_ops)) {
dev_err(pcie->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
-
- pcie->domain = pci_msi_create_irq_domain(fwnode, &apple_msi_info,
- parent);
- if (!pcie->domain) {
- dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
- return -ENOMEM;
- }
-
return 0;
}
+static struct apple_pcie *apple_pcie_lookup(struct device *dev)
+{
+ return pci_host_bridge_priv(dev_get_drvdata(dev));
+}
+
static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
{
struct pci_config_window *cfg = pdev->sysdata;
- struct apple_pcie *pcie = cfg->priv;
+ struct apple_pcie *pcie;
struct pci_dev *port_pdev;
struct apple_pcie_port *port;
+ pcie = apple_pcie_lookup(cfg->parent);
+ if (WARN_ON(!pcie))
+ return NULL;
+
/* Find the root port this device is on */
port_pdev = pcie_find_root_port(pdev);
@@ -667,12 +751,16 @@ static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
return NULL;
}
-static int apple_pcie_add_device(struct apple_pcie_port *port,
- struct pci_dev *pdev)
+static int apple_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
{
- u32 sid, rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ u32 sid, rid = pci_dev_id(pdev);
+ struct apple_pcie_port *port;
int idx, err;
+ port = apple_pcie_get_port(pdev);
+ if (!port)
+ return 0;
+
dev_dbg(&pdev->dev, "added to bus %s, index %d\n",
pci_name(pdev->bus->self), port->idx);
@@ -698,18 +786,22 @@ static int apple_pcie_add_device(struct apple_pcie_port *port,
return idx >= 0 ? 0 : -ENOSPC;
}
-static void apple_pcie_release_device(struct apple_pcie_port *port,
- struct pci_dev *pdev)
+static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
{
- u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ struct apple_pcie_port *port;
+ u32 rid = pci_dev_id(pdev);
int idx;
+ port = apple_pcie_get_port(pdev);
+ if (!port)
+ return;
+
mutex_lock(&port->pcie->lock);
for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
u32 val;
- val = readl_relaxed(port->base + PORT_RID2SID(idx));
+ val = readl_relaxed(port_rid2sid_addr(port, idx));
if ((val & 0xffff) == rid) {
apple_pcie_rid2sid_write(port, idx, 0);
bitmap_release_region(port->sid_map, idx, 0);
@@ -721,106 +813,71 @@ static void apple_pcie_release_device(struct apple_pcie_port *port,
mutex_unlock(&port->pcie->lock);
}
-static int apple_pcie_bus_notifier(struct notifier_block *nb,
- unsigned long action,
- void *data)
+static int apple_pcie_init(struct pci_config_window *cfg)
{
- struct device *dev = data;
- struct pci_dev *pdev = to_pci_dev(dev);
- struct apple_pcie_port *port;
- int err;
+ struct device *dev = cfg->parent;
+ struct apple_pcie *pcie;
+ int ret;
- /*
- * This is a bit ugly. We assume that if we get notified for
- * any PCI device, we must be in charge of it, and that there
- * is no other PCI controller in the whole system. It probably
- * holds for now, but who knows for how long?
- */
- port = apple_pcie_get_port(pdev);
- if (!port)
- return NOTIFY_DONE;
+ pcie = apple_pcie_lookup(dev);
+ if (WARN_ON(!pcie))
+ return -ENOENT;
- switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
- err = apple_pcie_add_device(port, pdev);
- if (err)
- return notifier_from_errno(err);
- break;
- case BUS_NOTIFY_DEL_DEVICE:
- apple_pcie_release_device(port, pdev);
- break;
- default:
- return NOTIFY_DONE;
+ for_each_available_child_of_node_scoped(dev->of_node, of_port) {
+ ret = apple_pcie_setup_port(pcie, of_port);
+ if (ret) {
+ dev_err(dev, "Port %pOF setup fail: %d\n", of_port, ret);
+ return ret;
+ }
}
- return NOTIFY_OK;
+ return 0;
}
-static struct notifier_block apple_pcie_nb = {
- .notifier_call = apple_pcie_bus_notifier,
+static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
+ .init = apple_pcie_init,
+ .enable_device = apple_pcie_enable_device,
+ .disable_device = apple_pcie_disable_device,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
};
-static int apple_pcie_init(struct pci_config_window *cfg)
+static int apple_pcie_probe(struct platform_device *pdev)
{
- struct device *dev = cfg->parent;
- struct platform_device *platform = to_platform_device(dev);
- struct device_node *of_port;
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
struct apple_pcie *pcie;
int ret;
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
return -ENOMEM;
+ pcie = pci_host_bridge_priv(bridge);
pcie->dev = dev;
-
- mutex_init(&pcie->lock);
-
- pcie->base = devm_platform_ioremap_resource(platform, 1);
+ pcie->hw = of_device_get_match_data(dev);
+ if (!pcie->hw)
+ return -ENODEV;
+ pcie->base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- cfg->priv = pcie;
+ mutex_init(&pcie->lock);
INIT_LIST_HEAD(&pcie->ports);
- for_each_child_of_node(dev->of_node, of_port) {
- ret = apple_pcie_setup_port(pcie, of_port);
- if (ret) {
- dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
- of_node_put(of_port);
- return ret;
- }
- }
-
- return apple_msi_init(pcie);
-}
-
-static int apple_pcie_probe(struct platform_device *pdev)
-{
- int ret;
-
- ret = bus_register_notifier(&pci_bus_type, &apple_pcie_nb);
+ ret = apple_msi_init(pcie);
if (ret)
return ret;
- ret = pci_host_common_probe(pdev);
- if (ret)
- bus_unregister_notifier(&pci_bus_type, &apple_pcie_nb);
-
- return ret;
+ return pci_host_common_init(pdev, bridge, &apple_pcie_cfg_ecam_ops);
}
-static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
- .init = apple_pcie_init,
- .pci_ops = {
- .map_bus = pci_ecam_map_bus,
- .read = pci_generic_config_read,
- .write = pci_generic_config_write,
- }
-};
-
static const struct of_device_id apple_pcie_of_match[] = {
- { .compatible = "apple,pcie", .data = &apple_pcie_cfg_ecam_ops },
+ { .compatible = "apple,t6020-pcie", .data = &t602x_hw },
+ { .compatible = "apple,pcie", .data = &t8103_hw },
{ }
};
MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
@@ -835,4 +892,5 @@ static struct platform_driver apple_pcie_driver = {
};
module_platform_driver(apple_pcie_driver);
+MODULE_DESCRIPTION("Apple PCIe host bridge driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index edf283e2b5dd..062f55690012 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -12,16 +12,20 @@
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
+#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/msi.h>
+#include <linux/notifier.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
+#include <linux/panic_notifier.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
#include <linux/printk.h>
@@ -29,7 +33,9 @@
#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include "../pci.h"
@@ -40,18 +46,28 @@
/* Broadcom STB PCIe Register Offsets */
#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1 0x0188
#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK 0xc
-#define PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN 0x0
+#define PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN 0x0
#define PCIE_RC_CFG_PRIV1_ID_VAL3 0x043c
#define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff
#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc
-#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00
+#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK 0x1f0
+
+#define PCIE_RC_CFG_PRIV1_ROOT_CAP 0x4f8
+#define PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK 0xf8
#define PCIE_RC_DL_MDIO_ADDR 0x1100
#define PCIE_RC_DL_MDIO_WR_DATA 0x1104
#define PCIE_RC_DL_MDIO_RD_DATA 0x1108
+#define PCIE_RC_PL_REG_PHY_CTL_1 0x1804
+#define PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK 0x8
+
+#define PCIE_RC_PL_PHY_CTL_15 0x184c
+#define PCIE_RC_PL_PHY_CTL_15_DIS_PLL_PD_MASK 0x400000
+#define PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK 0xff
+
#define PCIE_MISC_MISC_CTRL 0x4008
#define PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK 0x80
#define PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK 0x400
@@ -72,15 +88,19 @@
#define PCIE_MEM_WIN0_HI(win) \
PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8)
+/*
+ * NOTE: You may see the term "BAR" in a number of register names used by
+ * this driver. The term is an artifact of when the HW core was an
+ * endpoint device (EP). Now it is a root complex (RC) and anywhere a
+ * register has the term "BAR" it is related to an inbound window.
+ */
+
+#define PCIE_BRCM_MAX_INBOUND_WINS 16
#define PCIE_MISC_RC_BAR1_CONFIG_LO 0x402c
#define PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK 0x1f
-#define PCIE_MISC_RC_BAR2_CONFIG_LO 0x4034
-#define PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK 0x1f
-#define PCIE_MISC_RC_BAR2_CONFIG_HI 0x4038
+#define PCIE_MISC_RC_BAR4_CONFIG_LO 0x40d4
-#define PCIE_MISC_RC_BAR3_CONFIG_LO 0x403c
-#define PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK 0x1f
#define PCIE_MISC_MSI_BAR_CONFIG_LO 0x4044
#define PCIE_MISC_MSI_BAR_CONFIG_HI 0x4048
@@ -119,25 +139,60 @@
#define PCIE_MEM_WIN0_LIMIT_HI(win) \
PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8)
-#define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2
+#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK 0x200000
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000
#define PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x00800000
+#define PCIE_CLKREQ_MASK \
+ (PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK | \
+ PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK)
+#define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP 0x40ac
+#define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK BIT(0)
+#define PCIE_MISC_UBUS_BAR4_CONFIG_REMAP 0x410c
-#define PCIE_INTR2_CPU_BASE 0x4300
#define PCIE_MSI_INTR2_BASE 0x4500
-/* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */
+
+/* Offsets from INTR2_CPU and MSI_INTR2 BASE offsets */
#define MSI_INT_STATUS 0x0
#define MSI_INT_CLR 0x8
#define MSI_INT_MASK_SET 0x10
#define MSI_INT_MASK_CLR 0x14
-#define PCIE_EXT_CFG_DATA 0x8000
-#define PCIE_EXT_CFG_INDEX 0x9000
+/* Error report registers */
+#define PCIE_OUTB_ERR_TREAT 0x6000
+#define PCIE_OUTB_ERR_TREAT_CONFIG 0x1
+#define PCIE_OUTB_ERR_TREAT_MEM 0x2
+#define PCIE_OUTB_ERR_VALID 0x6004
+#define PCIE_OUTB_ERR_CLEAR 0x6008
+#define PCIE_OUTB_ERR_ACC_INFO 0x600c
+#define PCIE_OUTB_ERR_ACC_INFO_CFG_ERR BIT(0)
+#define PCIE_OUTB_ERR_ACC_INFO_MEM_ERR BIT(1)
+#define PCIE_OUTB_ERR_ACC_INFO_TYPE_64 BIT(2)
+#define PCIE_OUTB_ERR_ACC_INFO_DIR_WRITE BIT(4)
+#define PCIE_OUTB_ERR_ACC_INFO_BYTE_LANES 0xff00
+#define PCIE_OUTB_ERR_ACC_ADDR 0x6010
+#define PCIE_OUTB_ERR_ACC_ADDR_BUS 0xff00000
+#define PCIE_OUTB_ERR_ACC_ADDR_DEV 0xf8000
+#define PCIE_OUTB_ERR_ACC_ADDR_FUNC 0x7000
+#define PCIE_OUTB_ERR_ACC_ADDR_REG 0xfff
+#define PCIE_OUTB_ERR_CFG_CAUSE 0x6014
+#define PCIE_OUTB_ERR_CFG_CAUSE_TIMEOUT BIT(6)
+#define PCIE_OUTB_ERR_CFG_CAUSE_ABORT BIT(5)
+#define PCIE_OUTB_ERR_CFG_CAUSE_UNSUPP_REQ BIT(4)
+#define PCIE_OUTB_ERR_CFG_CAUSE_ACC_TIMEOUT BIT(2)
+#define PCIE_OUTB_ERR_CFG_CAUSE_ACC_DISABLED BIT(1)
+#define PCIE_OUTB_ERR_CFG_CAUSE_ACC_64BIT BIT(0)
+#define PCIE_OUTB_ERR_MEM_ADDR_LO 0x6018
+#define PCIE_OUTB_ERR_MEM_ADDR_HI 0x601c
+#define PCIE_OUTB_ERR_MEM_CAUSE 0x6020
+#define PCIE_OUTB_ERR_MEM_CAUSE_TIMEOUT BIT(6)
+#define PCIE_OUTB_ERR_MEM_CAUSE_ABORT BIT(5)
+#define PCIE_OUTB_ERR_MEM_CAUSE_UNSUPP_REQ BIT(4)
+#define PCIE_OUTB_ERR_MEM_CAUSE_ACC_DISABLED BIT(1)
+#define PCIE_OUTB_ERR_MEM_CAUSE_BAD_ADDR BIT(0)
#define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1
-#define PCIE_RGR1_SW_INIT_1_PERST_SHIFT 0x0
#define RGR1_SW_INIT_1_INIT_GENERIC_MASK 0x2
#define RGR1_SW_INIT_1_INIT_GENERIC_SHIFT 0x1
@@ -161,8 +216,9 @@
#define MDIO_PORT0 0x0
#define MDIO_DATA_MASK 0x7fffffff
#define MDIO_PORT_MASK 0xf0000
+#define MDIO_PORT_EXT_MASK 0x200000
#define MDIO_REGAD_MASK 0xffff
-#define MDIO_CMD_MASK 0xfff00000
+#define MDIO_CMD_MASK 0x00100000
#define MDIO_CMD_READ 0x1
#define MDIO_CMD_WRITE 0x0
#define MDIO_DATA_DONE_MASK 0x80000000
@@ -178,9 +234,11 @@
#define SSC_STATUS_PLL_LOCK_MASK 0x800
#define PCIE_BRCM_MAX_MEMC 3
-#define IDX_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_INDEX])
-#define DATA_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_DATA])
-#define PCIE_RGR1_SW_INIT_1(pcie) (pcie->reg_offsets[RGR1_SW_INIT_1])
+#define IDX_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_INDEX])
+#define DATA_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_DATA])
+#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->cfg->offsets[RGR1_SW_INIT_1])
+#define HARD_DEBUG(pcie) ((pcie)->cfg->offsets[PCIE_HARD_DEBUG])
+#define INTR2_CPU_BASE(pcie) ((pcie)->cfg->offsets[PCIE_INTR2_CPU_BASE])
/* Rescal registers */
#define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700
@@ -199,27 +257,45 @@ enum {
RGR1_SW_INIT_1,
EXT_CFG_INDEX,
EXT_CFG_DATA,
+ PCIE_HARD_DEBUG,
+ PCIE_INTR2_CPU_BASE,
};
-enum {
- RGR1_SW_INIT_1_INIT_MASK,
- RGR1_SW_INIT_1_INIT_SHIFT,
-};
-
-enum pcie_type {
+enum pcie_soc_base {
GENERIC,
- BCM7425,
- BCM7435,
+ BCM2711,
BCM4908,
BCM7278,
- BCM2711,
+ BCM7425,
+ BCM7435,
+ BCM7712,
+};
+
+struct inbound_win {
+ u64 size;
+ u64 pci_offset;
+ u64 cpu_addr;
};
+/*
+ * The RESCAL block is tied to PCIe controller #1, regardless of the number of
+ * controllers, and turning off PCIe controller #1 prevents access to the RESCAL
+ * register blocks, therefore no other controller can access this register
+ * space, and depending upon the bus fabric we may get a timeout (UBUS/GISB),
+ * or a hang (AXI).
+ */
+#define CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN BIT(0)
+
struct pcie_cfg_data {
const int *offsets;
- const enum pcie_type type;
- void (*perst_set)(struct brcm_pcie *pcie, u32 val);
- void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ const enum pcie_soc_base soc_base;
+ const bool has_phy;
+ const u32 quirks;
+ u8 num_inbound_wins;
+ int (*perst_set)(struct brcm_pcie *pcie, u32 val);
+ int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ int (*post_setup)(struct brcm_pcie *pcie);
+ bool has_err_report;
};
struct subdev_regulators {
@@ -231,7 +307,6 @@ struct brcm_msi {
struct device *dev;
void __iomem *base;
struct device_node *np;
- struct irq_domain *msi_domain;
struct irq_domain *inner_domain;
struct mutex lock; /* guards the alloc/free operations */
u64 target_addr;
@@ -255,22 +330,43 @@ struct brcm_pcie {
int gen;
u64 msi_target_addr;
struct brcm_msi *msi;
- const int *reg_offsets;
- enum pcie_type type;
struct reset_control *rescal;
struct reset_control *perst_reset;
+ struct reset_control *bridge_reset;
+ struct reset_control *swinit_reset;
int num_memc;
u64 memc_size[PCIE_BRCM_MAX_MEMC];
u32 hw_rev;
- void (*perst_set)(struct brcm_pcie *pcie, u32 val);
- void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
struct subdev_regulators *sr;
bool ep_wakeup_capable;
+ const struct pcie_cfg_data *cfg;
+ bool bridge_in_reset;
+ struct notifier_block die_notifier;
+ struct notifier_block panic_notifier;
+ spinlock_t bridge_lock;
};
static inline bool is_bmips(const struct brcm_pcie *pcie)
{
- return pcie->type == BCM7435 || pcie->type == BCM7425;
+ return pcie->cfg->soc_base == BCM7435 || pcie->cfg->soc_base == BCM7425;
+}
+
+static int brcm_pcie_bridge_sw_init_set(struct brcm_pcie *pcie, u32 val)
+{
+ unsigned long flags;
+ int ret;
+
+ if (pcie->cfg->has_err_report)
+ spin_lock_irqsave(&pcie->bridge_lock, flags);
+
+ ret = pcie->cfg->bridge_sw_init_set(pcie, val);
+ /* If we fail, assume the bridge is in reset (off) */
+ pcie->bridge_in_reset = ret ? true : val;
+
+ if (pcie->cfg->has_err_report)
+ spin_unlock_irqrestore(&pcie->bridge_lock, flags);
+
+ return ret;
}
/*
@@ -284,8 +380,8 @@ static int brcm_pcie_encode_ibar_size(u64 size)
if (log2_in >= 12 && log2_in <= 15)
/* Covers 4KB to 32KB (inclusive) */
return (log2_in - 12) + 0x1c;
- else if (log2_in >= 16 && log2_in <= 35)
- /* Covers 64KB to 32GB, (inclusive) */
+ else if (log2_in >= 16 && log2_in <= 36)
+ /* Covers 64KB to 64GB, (inclusive) */
return log2_in - 15;
/* Something is awry so disable */
return 0;
@@ -295,6 +391,7 @@ static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
{
u32 pkt = 0;
+ pkt |= FIELD_PREP(MDIO_PORT_EXT_MASK, port >> 4);
pkt |= FIELD_PREP(MDIO_PORT_MASK, port);
pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad);
pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd);
@@ -330,7 +427,7 @@ static int brcm_pcie_mdio_write(void __iomem *base, u8 port,
readl(base + PCIE_RC_DL_MDIO_ADDR);
writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
- err = readw_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data,
+ err = readl_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data,
MDIO_WT_DONE(data), 10, 100);
return err;
}
@@ -378,17 +475,17 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
{
u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
- u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
+ u32 lnkcap = readl(pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen;
- writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
+ u32p_replace_bits(&lnkcap, gen, PCI_EXP_LNKCAP_SLS);
+ writel(lnkcap, pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- lnkctl2 = (lnkctl2 & ~0xf) | gen;
+ u16p_replace_bits(&lnkctl2, gen, PCI_EXP_LNKCTL2_TLS);
writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
}
static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
- unsigned int win, u64 cpu_addr,
+ u8 win, u64 cpu_addr,
u64 pcie_addr, u64 size)
{
u32 cpu_addr_mb_high, limit_addr_mb_high;
@@ -431,18 +528,20 @@ static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
}
-static struct irq_chip brcm_msi_irq_chip = {
- .name = "BRCM STB PCIe MSI",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define BRCM_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define BRCM_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI)
-static struct msi_domain_info brcm_msi_domain_info = {
- /* Multi MSI is supported by the controller, but not by this driver */
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
- .chip = &brcm_msi_irq_chip,
+static const struct msi_parent_ops brcm_msi_parent_ops = {
+ .required_flags = BRCM_MSI_FLAGS_REQUIRED,
+ .supported_flags = BRCM_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "BRCM-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static void brcm_pcie_msi_isr(struct irq_desc *desc)
@@ -479,12 +578,6 @@ static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq;
}
-static int brcm_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void brcm_msi_ack_irq(struct irq_data *data)
{
struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
@@ -497,7 +590,6 @@ static void brcm_msi_ack_irq(struct irq_data *data)
static struct irq_chip brcm_msi_bottom_irq_chip = {
.name = "BRCM STB MSI",
.irq_compose_msi_msg = brcm_msi_compose_msi_msg,
- .irq_set_affinity = brcm_msi_set_affinity,
.irq_ack = brcm_msi_ack_irq,
};
@@ -533,7 +625,7 @@ static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
return hwirq;
for (i = 0; i < nr_irqs; i++)
- irq_domain_set_info(domain, virq + i, hwirq + i,
+ irq_domain_set_info(domain, virq + i, (irq_hw_number_t)hwirq + i,
&brcm_msi_bottom_irq_chip, domain->host_data,
handle_edge_irq, NULL, NULL);
return 0;
@@ -555,21 +647,18 @@ static const struct irq_domain_ops msi_domain_ops = {
static int brcm_allocate_domains(struct brcm_msi *msi)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);
struct device *dev = msi->dev;
- msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi);
- if (!msi->inner_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(msi->np),
+ .ops = &msi_domain_ops,
+ .host_data = msi,
+ .size = msi->nr,
+ };
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &brcm_msi_domain_info,
- msi->inner_domain);
- if (!msi->msi_domain) {
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &brcm_msi_parent_ops);
+ if (!msi->inner_domain) {
dev_err(dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->inner_domain);
return -ENOMEM;
}
@@ -578,7 +667,6 @@ static int brcm_allocate_domains(struct brcm_msi *msi)
static void brcm_free_domains(struct brcm_msi *msi)
{
- irq_domain_remove(msi->msi_domain);
irq_domain_remove(msi->inner_domain);
}
@@ -644,7 +732,7 @@ static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR);
if (msi->legacy) {
- msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE;
+ msi->intr_base = msi->base + INTR2_CPU_BASE(pcie);
msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
msi->legacy_shift = 24;
} else {
@@ -700,8 +788,8 @@ static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus,
/* For devices, write to the config space index register */
idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
- writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
- return base + PCIE_EXT_CFG_DATA + PCIE_ECAM_REG(where);
+ writel(idx, base + IDX_ADDR(pcie));
+ return base + DATA_ADDR(pcie) + PCIE_ECAM_REG(where);
}
static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
@@ -725,17 +813,33 @@ static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
return base + DATA_ADDR(pcie);
}
-static void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
{
- u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
+ u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT;
+ int ret = 0;
+
+ if (pcie->bridge_reset) {
+ if (val)
+ ret = reset_control_assert(pcie->bridge_reset);
+ else
+ ret = reset_control_deassert(pcie->bridge_reset);
+
+ if (ret)
+ dev_err(pcie->dev, "failed to %s 'bridge' reset, err=%d\n",
+ val ? "assert" : "deassert", ret);
+
+ return ret;
+ }
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
tmp = (tmp & ~mask) | ((val << shift) & mask);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return ret;
}
-static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
{
u32 tmp, mask = RGR1_SW_INIT_1_INIT_7278_MASK;
u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT;
@@ -743,20 +847,29 @@ static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
tmp = (tmp & ~mask) | ((val << shift) & mask);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return 0;
}
-static void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
{
+ int ret;
+
if (WARN_ONCE(!pcie->perst_reset, "missing PERST# reset controller\n"))
- return;
+ return -EINVAL;
if (val)
- reset_control_assert(pcie->perst_reset);
+ ret = reset_control_assert(pcie->perst_reset);
else
- reset_control_deassert(pcie->perst_reset);
+ ret = reset_control_deassert(pcie->perst_reset);
+
+ if (ret)
+ dev_err(pcie->dev, "failed to %s 'perst' reset, err=%d\n",
+ val ? "assert" : "deassert", ret);
+ return ret;
}
-static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
@@ -764,34 +877,110 @@ static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL);
u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK);
writel(tmp, pcie->base + PCIE_MISC_PCIE_CTRL);
+
+ return 0;
}
-static void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return 0;
}
-static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
- u64 *rc_bar2_size,
- u64 *rc_bar2_offset)
+static int brcm_pcie_post_setup_bcm2712(struct brcm_pcie *pcie)
+{
+ static const u16 data[] = { 0x50b9, 0xbda1, 0x0094, 0x97b4, 0x5030,
+ 0x5030, 0x0007 };
+ static const u8 regs[] = { 0x16, 0x17, 0x18, 0x19, 0x1b, 0x1c, 0x1e };
+ int ret, i;
+ u32 tmp;
+
+ /* Allow a 54MHz (xosc) refclk source */
+ ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET, 0x1600);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, regs[i], data[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ usleep_range(100, 200);
+
+ /*
+ * Set L1SS sub-state timers to avoid lengthy state transitions,
+ * PM clock period is 18.52ns (1/54MHz, round down).
+ */
+ tmp = readl(pcie->base + PCIE_RC_PL_PHY_CTL_15);
+ tmp &= ~PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK;
+ tmp |= 0x12;
+ writel(tmp, pcie->base + PCIE_RC_PL_PHY_CTL_15);
+
+ return 0;
+}
+
+static void add_inbound_win(struct inbound_win *b, u8 *count, u64 size,
+ u64 cpu_addr, u64 pci_offset)
+{
+ b->size = size;
+ b->cpu_addr = cpu_addr;
+ b->pci_offset = pci_offset;
+ (*count)++;
+}
+
+static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,
+ struct inbound_win inbound_wins[])
{
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ u64 pci_offset, cpu_addr, size = 0, tot_size = 0;
struct resource_entry *entry;
struct device *dev = pcie->dev;
u64 lowest_pcie_addr = ~(u64)0;
int ret, i = 0;
- u64 size = 0;
+ u8 n = 0;
+
+ /*
+ * The HW registers (and PCIe) use order-1 numbering for BARs. As such,
+ * we have inbound_wins[0] unused and BAR1 starts at inbound_wins[1].
+ */
+ struct inbound_win *b_begin = &inbound_wins[1];
+ struct inbound_win *b = b_begin;
+
+ /*
+ * STB chips beside 7712 disable the first inbound window default.
+ * Rather being mapped to system memory it is mapped to the
+ * internal registers of the SoC. This feature is deprecated, has
+ * security considerations, and is not implemented in our modern
+ * SoCs.
+ */
+ if (pcie->cfg->soc_base != BCM7712)
+ add_inbound_win(b++, &n, 0, 0, 0);
resource_list_for_each_entry(entry, &bridge->dma_ranges) {
- u64 pcie_beg = entry->res->start - entry->offset;
+ u64 pcie_start = entry->res->start - entry->offset;
+ u64 cpu_start = entry->res->start;
+
+ size = resource_size(entry->res);
+ tot_size += size;
+ if (pcie_start < lowest_pcie_addr)
+ lowest_pcie_addr = pcie_start;
+ /*
+ * 7712 and newer chips may have many BARs, with each
+ * offering a non-overlapping viewport to system memory.
+ * That being said, each BARs size must still be a power of
+ * two.
+ */
+ if (pcie->cfg->soc_base == BCM7712)
+ add_inbound_win(b++, &n, size, cpu_start, pcie_start);
- size += entry->res->end - entry->res->start + 1;
- if (pcie_beg < lowest_pcie_addr)
- lowest_pcie_addr = pcie_beg;
+ if (n > pcie->cfg->num_inbound_wins)
+ break;
}
if (lowest_pcie_addr == ~(u64)0) {
@@ -799,13 +988,20 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
return -EINVAL;
}
+ /*
+ * 7712 and newer chips do not have an internal memory mapping system
+ * that enables multiple memory controllers. As such, it can return
+ * now w/o doing special configuration.
+ */
+ if (pcie->cfg->soc_base == BCM7712)
+ return n;
+
ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
PCIE_BRCM_MAX_MEMC);
-
if (ret <= 0) {
/* Make an educated guess */
pcie->num_memc = 1;
- pcie->memc_size[0] = 1ULL << fls64(size - 1);
+ pcie->memc_size[0] = 1ULL << fls64(tot_size - 1);
} else {
pcie->num_memc = ret;
}
@@ -814,10 +1010,15 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
for (i = 0, size = 0; i < pcie->num_memc; i++)
size += pcie->memc_size[i];
- /* System memory starts at this address in PCIe-space */
- *rc_bar2_offset = lowest_pcie_addr;
- /* The sum of all memc views must also be a power of 2 */
- *rc_bar2_size = 1ULL << fls64(size - 1);
+ /* Our HW mandates that the window size must be a power of 2 */
+ size = 1ULL << fls64(size - 1);
+
+ /*
+ * For STB chips, the BAR2 cpu_addr is hardwired to the start
+ * of system memory, so we set it to 0.
+ */
+ cpu_addr = 0;
+ pci_offset = lowest_pcie_addr;
/*
* We validate the inbound memory view even though we should trust
@@ -831,7 +1032,7 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
*
* The PCIe host controller by design must set the inbound viewport to
* be a contiguous arrangement of all of the system's memory. In
- * addition, its size mut be a power of two. To further complicate
+ * addition, its size must be a power of two. To further complicate
* matters, the viewport must start on a pcie-address that is aligned
* on a multiple of its size. If a portion of the viewport does not
* represent system memory -- e.g. 3GB of memory requires a 4GB
@@ -852,39 +1053,119 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
* outbound memory @ 3GB). So instead it will start at the 1x
* multiple of its size
*/
- if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) ||
- (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
- dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
- *rc_bar2_size, *rc_bar2_offset);
+ if (!size || (pci_offset & (size - 1)) ||
+ (pci_offset < SZ_4G && pci_offset > SZ_2G)) {
+ dev_err(dev, "Invalid inbound_win2_offset/size: size 0x%llx, off 0x%llx\n",
+ size, pci_offset);
return -EINVAL;
}
- return 0;
+ /* Enable inbound window 2, the main inbound window for STB chips */
+ add_inbound_win(b++, &n, size, cpu_addr, pci_offset);
+
+ /*
+ * Disable inbound window 3. On some chips presents the same
+ * window as #2 but the data appears in a settable endianness.
+ */
+ add_inbound_win(b++, &n, 0, 0, 0);
+
+ return n;
+}
+
+static u32 brcm_bar_reg_offset(int bar)
+{
+ if (bar <= 3)
+ return PCIE_MISC_RC_BAR1_CONFIG_LO + 8 * (bar - 1);
+ else
+ return PCIE_MISC_RC_BAR4_CONFIG_LO + 8 * (bar - 4);
+}
+
+static u32 brcm_ubus_reg_offset(int bar)
+{
+ if (bar <= 3)
+ return PCIE_MISC_UBUS_BAR1_CONFIG_REMAP + 8 * (bar - 1);
+ else
+ return PCIE_MISC_UBUS_BAR4_CONFIG_REMAP + 8 * (bar - 4);
+}
+
+static void set_inbound_win_registers(struct brcm_pcie *pcie,
+ const struct inbound_win *inbound_wins,
+ u8 num_inbound_wins)
+{
+ void __iomem *base = pcie->base;
+ int i;
+
+ for (i = 1; i <= num_inbound_wins; i++) {
+ u64 pci_offset = inbound_wins[i].pci_offset;
+ u64 cpu_addr = inbound_wins[i].cpu_addr;
+ u64 size = inbound_wins[i].size;
+ u32 reg_offset = brcm_bar_reg_offset(i);
+ u32 tmp = lower_32_bits(pci_offset);
+
+ u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(size),
+ PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK);
+
+ /* Write low */
+ writel_relaxed(tmp, base + reg_offset);
+ /* Write high */
+ writel_relaxed(upper_32_bits(pci_offset), base + reg_offset + 4);
+
+ /*
+ * Most STB chips:
+ * Do nothing.
+ * 7712:
+ * All of their BARs need to be set.
+ */
+ if (pcie->cfg->soc_base == BCM7712) {
+ /* BUS remap register settings */
+ reg_offset = brcm_ubus_reg_offset(i);
+ tmp = lower_32_bits(cpu_addr) & ~0xfff;
+ tmp |= PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK;
+ writel_relaxed(tmp, base + reg_offset);
+ tmp = upper_32_bits(cpu_addr);
+ writel_relaxed(tmp, base + reg_offset + 4);
+ }
+ }
}
static int brcm_pcie_setup(struct brcm_pcie *pcie)
{
- u64 rc_bar2_offset, rc_bar2_size;
+ struct inbound_win inbound_wins[PCIE_BRCM_MAX_INBOUND_WINS];
void __iomem *base = pcie->base;
struct pci_host_bridge *bridge;
struct resource_entry *entry;
- u32 tmp, burst, aspm_support;
- int num_out_wins = 0;
- int ret, memc;
+ u32 tmp, burst, num_lanes, num_lanes_cap;
+ u8 num_out_wins = 0;
+ int num_inbound_wins = 0;
+ int memc, ret;
/* Reset the bridge */
- pcie->bridge_sw_init_set(pcie, 1);
+ ret = brcm_pcie_bridge_sw_init_set(pcie, 1);
+ if (ret)
+ return ret;
+
+ /* Ensure that PERST# is asserted; some bootloaders may deassert it. */
+ if (pcie->cfg->soc_base == BCM2711) {
+ ret = pcie->cfg->perst_set(pcie, 1);
+ if (ret) {
+ pcie->cfg->bridge_sw_init_set(pcie, 0);
+ return ret;
+ }
+ }
+
usleep_range(100, 200);
/* Take the bridge out of reset */
- pcie->bridge_sw_init_set(pcie, 0);
+ ret = brcm_pcie_bridge_sw_init_set(pcie, 0);
+ if (ret)
+ return ret;
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
if (is_bmips(pcie))
tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
else
tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
/* Wait for SerDes to be stable */
usleep_range(100, 200);
@@ -895,9 +1176,9 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
*/
if (is_bmips(pcie))
burst = 0x1; /* 256 bytes */
- else if (pcie->type == BCM2711)
+ else if (pcie->cfg->soc_base == BCM2711)
burst = 0x0; /* 128 bytes */
- else if (pcie->type == BCM7278)
+ else if (pcie->cfg->soc_base == BCM7278)
burst = 0x3; /* 512 bytes */
else
burst = 0x2; /* 512 bytes */
@@ -914,17 +1195,16 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK);
writel(tmp, base + PCIE_MISC_MISC_CTRL);
- ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
- &rc_bar2_offset);
- if (ret)
- return ret;
+ num_inbound_wins = brcm_pcie_get_inbound_wins(pcie, inbound_wins);
+ if (num_inbound_wins < 0)
+ return num_inbound_wins;
- tmp = lower_32_bits(rc_bar2_offset);
- u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size),
- PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK);
- writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO);
- writel(upper_32_bits(rc_bar2_offset),
- base + PCIE_MISC_RC_BAR2_CONFIG_HI);
+ set_inbound_win_registers(pcie, inbound_wins, num_inbound_wins);
+
+ if (!brcm_pcie_rc_mode(pcie)) {
+ dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
+ return -EINVAL;
+ }
tmp = readl(base + PCIE_MISC_MISC_CTRL);
for (memc = 0; memc < pcie->num_memc; memc++) {
@@ -946,35 +1226,40 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
* 4GB or when the inbound area is smaller than 4GB (taking into
* account the rounding-up we're forced to perform).
*/
- if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G)
+ if (inbound_wins[2].pci_offset >= SZ_4G ||
+ (inbound_wins[2].size + inbound_wins[2].pci_offset) < SZ_4G)
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB;
else
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
- if (!brcm_pcie_rc_mode(pcie)) {
- dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
- return -EINVAL;
- }
-
- /* disable the PCIe->GISB memory window (RC_BAR1) */
- tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
- tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
- writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO);
-
- /* disable the PCIe->SCB memory window (RC_BAR3) */
- tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO);
- tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
- writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
/* Don't advertise L0s capability if 'aspm-no-l0s' */
- aspm_support = PCIE_LINK_STATE_L1;
- if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
- aspm_support |= PCIE_LINK_STATE_L0S;
tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- u32p_replace_bits(&tmp, aspm_support,
- PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
+ if (of_property_read_bool(pcie->np, "aspm-no-l0s"))
+ tmp &= ~PCI_EXP_LNKCAP_ASPM_L0S;
writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ /* 'tmp' still holds the contents of PRIV1_LINK_CAPABILITY */
+ num_lanes_cap = u32_get_bits(tmp, PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK);
+ num_lanes = 0;
+
+ /*
+ * Use hardware negotiated Max Link Width value by default. If the
+ * "num-lanes" DT property is present, assume that the chip's default
+ * link width capability information is incorrect/undesired and use the
+ * specified value instead.
+ */
+ if (!of_property_read_u32(pcie->np, "num-lanes", &num_lanes) &&
+ num_lanes && num_lanes <= 4 && num_lanes_cap != num_lanes) {
+ u32p_replace_bits(&tmp, num_lanes,
+ PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ tmp = readl(base + PCIE_RC_PL_REG_PHY_CTL_1);
+ u32p_replace_bits(&tmp, 1,
+ PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK);
+ writel(tmp, base + PCIE_RC_PL_REG_PHY_CTL_1);
+ }
+
/*
* For config space accesses on the RC, show the right class for
* a PCIe-PCIe bridge (the default setting is to be EP mode).
@@ -1015,32 +1300,120 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
num_out_wins++;
}
- /* PCIe->SCB endian mode for BAR */
+ /* PCIe->SCB endian mode for inbound window */
tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
- u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
+ u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN,
PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+ if (pcie->cfg->post_setup) {
+ ret = pcie->cfg->post_setup(pcie);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
+/*
+ * This extends the timeout period for an access to an internal bus. This
+ * access timeout may occur during L1SS sleep periods, even without the
+ * presence of a PCIe access.
+ */
+static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie)
+{
+ /* TIMEOUT register is two registers before RGR1_SW_INIT_1 */
+ const unsigned int REG_OFFSET = PCIE_RGR1_SW_INIT_1(pcie) - 8;
+ u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */
+
+ /* 7712 does not have this (RGR1) timer */
+ if (pcie->cfg->soc_base == BCM7712)
+ return;
+
+ /* Each unit in timeout register is 1/216,000,000 seconds */
+ writel(216 * timeout_us, pcie->base + REG_OFFSET);
+}
+
+static void brcm_config_clkreq(struct brcm_pcie *pcie)
+{
+ static const char err_msg[] = "invalid 'brcm,clkreq-mode' DT string\n";
+ const char *mode = "default";
+ u32 clkreq_cntl;
+ int ret, tmp;
+
+ ret = of_property_read_string(pcie->np, "brcm,clkreq-mode", &mode);
+ if (ret && ret != -EINVAL) {
+ dev_err(pcie->dev, err_msg);
+ mode = "safe";
+ }
+
+ /* Start out assuming safe mode (both mode bits cleared) */
+ clkreq_cntl = readl(pcie->base + HARD_DEBUG(pcie));
+ clkreq_cntl &= ~PCIE_CLKREQ_MASK;
+
+ if (strcmp(mode, "no-l1ss") == 0) {
+ /*
+ * "no-l1ss" -- Provides Clock Power Management, L0s, and
+ * L1, but cannot provide L1 substate (L1SS) power
+ * savings. If the downstream device connected to the RC is
+ * L1SS capable AND the OS enables L1SS, all PCIe traffic
+ * may abruptly halt, potentially hanging the system.
+ */
+ clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
+ /*
+ * We want to un-advertise L1 substates because if the OS
+ * tries to configure the controller into using L1 substate
+ * power savings it may fail or hang when the RC HW is in
+ * "no-l1ss" mode.
+ */
+ tmp = readl(pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP);
+ u32p_replace_bits(&tmp, 2, PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK);
+ writel(tmp, pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP);
+
+ } else if (strcmp(mode, "default") == 0) {
+ /*
+ * "default" -- Provides L0s, L1, and L1SS, but not
+ * compliant to provide Clock Power Management;
+ * specifically, may not be able to meet the Tclron max
+ * timing of 400ns as specified in "Dynamic Clock Control",
+ * section 3.2.5.2.2 of the PCIe spec. This situation is
+ * atypical and should happen only with older devices.
+ */
+ clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK;
+ brcm_extend_rbus_timeout(pcie);
+
+ } else {
+ /*
+ * "safe" -- No power savings; refclk is driven by RC
+ * unconditionally.
+ */
+ if (strcmp(mode, "safe") != 0)
+ dev_err(pcie->dev, err_msg);
+ mode = "safe";
+ }
+ writel(clkreq_cntl, pcie->base + HARD_DEBUG(pcie));
+
+ dev_info(pcie->dev, "clkreq-mode set to %s\n", mode);
+}
+
static int brcm_pcie_start_link(struct brcm_pcie *pcie)
{
struct device *dev = pcie->dev;
void __iomem *base = pcie->base;
u16 nlw, cls, lnksta;
bool ssc_good = false;
- u32 tmp;
int ret, i;
+ /* Limit the generation if specified */
+ if (pcie->gen)
+ brcm_pcie_set_gen(pcie, pcie->gen);
+
/* Unassert the fundamental reset */
- pcie->perst_set(pcie, 0);
+ ret = pcie->cfg->perst_set(pcie, 0);
+ if (ret)
+ return ret;
- /*
- * Wait for 100ms after PERST# deassertion; see PCIe CEM specification
- * sections 2.2, PCIe r5.0, 6.6.1.
- */
- msleep(100);
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
/*
* Give the RC/EP even more time to wake up, before trying to
@@ -1055,8 +1428,7 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
return -ENODEV;
}
- if (pcie->gen)
- brcm_pcie_set_gen(pcie, pcie->gen);
+ brcm_config_clkreq(pcie);
if (pcie->ssc) {
ret = brcm_pcie_set_ssc(pcie);
@@ -1073,14 +1445,6 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
pci_speed_string(pcie_link_speed[cls]), nlw,
ssc_good ? "(SSC)" : "(!SSC)");
- /*
- * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1
- * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1.
- */
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
- tmp |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
-
return 0;
}
@@ -1128,7 +1492,8 @@ static int brcm_pcie_add_bus(struct pci_bus *bus)
ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
if (ret) {
- dev_info(dev, "No regulators for downstream device\n");
+ dev_info(dev, "Did not get regulators, err=%d\n", ret);
+ pcie->sr = NULL;
goto no_regulators;
}
@@ -1151,7 +1516,7 @@ static void brcm_pcie_remove_bus(struct pci_bus *bus)
struct subdev_regulators *sr = pcie->sr;
struct device *dev = &bus->dev;
- if (!sr)
+ if (!sr || !bus->parent || !pci_is_root_bus(bus->parent))
return;
if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
@@ -1224,23 +1589,25 @@ static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
static inline int brcm_phy_start(struct brcm_pcie *pcie)
{
- return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0;
+ return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 1) : 0;
}
static inline int brcm_phy_stop(struct brcm_pcie *pcie)
{
- return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0;
+ return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 0) : 0;
}
-static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
+static int brcm_pcie_turn_off(struct brcm_pcie *pcie)
{
void __iomem *base = pcie->base;
- int tmp;
+ int tmp, ret;
if (brcm_pcie_link_up(pcie))
brcm_pcie_enter_l23(pcie);
/* Assert fundamental reset */
- pcie->perst_set(pcie, 1);
+ ret = pcie->cfg->perst_set(pcie, 1);
+ if (ret)
+ return ret;
/* Deassert request for L23 in case it was asserted */
tmp = readl(base + PCIE_MISC_PCIE_CTRL);
@@ -1248,12 +1615,15 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
writel(tmp, base + PCIE_MISC_PCIE_CTRL);
/* Turn off SerDes */
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
- /* Shutdown PCIe bridge */
- pcie->bridge_sw_init_set(pcie, 1);
+ if (!(pcie->cfg->quirks & CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN))
+ /* Shutdown PCIe bridge */
+ ret = brcm_pcie_bridge_sw_init_set(pcie, 1);
+
+ return ret;
}
static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
@@ -1271,9 +1641,12 @@ static int brcm_pcie_suspend_noirq(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
- int ret;
+ int ret, rret;
+
+ ret = brcm_pcie_turn_off(pcie);
+ if (ret)
+ return ret;
- brcm_pcie_turn_off(pcie);
/*
* If brcm_phy_stop() returns an error, just dev_err(). If we
* return the error it will cause the suspend to fail and this is a
@@ -1302,7 +1675,10 @@ static int brcm_pcie_suspend_noirq(struct device *dev)
pcie->sr->supplies);
if (ret) {
dev_err(dev, "Could not turn off regulators\n");
- reset_control_reset(pcie->rescal);
+ rret = reset_control_reset(pcie->rescal);
+ if (rret)
+ dev_err(dev, "failed to reset 'rascal' controller ret=%d\n",
+ rret);
return ret;
}
}
@@ -1317,7 +1693,7 @@ static int brcm_pcie_resume_noirq(struct device *dev)
struct brcm_pcie *pcie = dev_get_drvdata(dev);
void __iomem *base;
u32 tmp;
- int ret;
+ int ret, rret;
base = pcie->base;
ret = clk_prepare_enable(pcie->clk);
@@ -1333,12 +1709,14 @@ static int brcm_pcie_resume_noirq(struct device *dev)
goto err_reset;
/* Take bridge out of reset so we can access the SERDES reg */
- pcie->bridge_sw_init_set(pcie, 0);
+ ret = brcm_pcie_bridge_sw_init_set(pcie, 0);
+ if (ret)
+ goto err_reset;
/* SERDES_IDDQ = 0 */
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
/* wait for serdes to be stable */
udelay(100);
@@ -1379,12 +1757,127 @@ err_regulator:
if (pcie->sr)
regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
err_reset:
- reset_control_rearm(pcie->rescal);
+ rret = reset_control_rearm(pcie->rescal);
+ if (rret)
+ dev_err(pcie->dev, "failed to rearm 'rescal' reset, err=%d\n", rret);
err_disable_clk:
clk_disable_unprepare(pcie->clk);
return ret;
}
+/* Dump out PCIe errors on die or panic */
+static int brcm_pcie_dump_err(struct brcm_pcie *pcie,
+ const char *type)
+{
+ void __iomem *base = pcie->base;
+ int i, is_cfg_err, is_mem_err, lanes;
+ const char *width_str, *direction_str;
+ u32 info, cfg_addr, cfg_cause, mem_cause, lo, hi;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ unsigned long flags;
+ char lanes_str[9];
+
+ spin_lock_irqsave(&pcie->bridge_lock, flags);
+ /* Don't access registers when the bridge is off */
+ if (pcie->bridge_in_reset || readl(base + PCIE_OUTB_ERR_VALID) == 0) {
+ spin_unlock_irqrestore(&pcie->bridge_lock, flags);
+ return NOTIFY_DONE;
+ }
+
+ /* Read all necessary registers so we can release the spinlock ASAP */
+ info = readl(base + PCIE_OUTB_ERR_ACC_INFO);
+ is_cfg_err = !!(info & PCIE_OUTB_ERR_ACC_INFO_CFG_ERR);
+ is_mem_err = !!(info & PCIE_OUTB_ERR_ACC_INFO_MEM_ERR);
+ if (is_cfg_err) {
+ cfg_addr = readl(base + PCIE_OUTB_ERR_ACC_ADDR);
+ cfg_cause = readl(base + PCIE_OUTB_ERR_CFG_CAUSE);
+ }
+ if (is_mem_err) {
+ mem_cause = readl(base + PCIE_OUTB_ERR_MEM_CAUSE);
+ lo = readl(base + PCIE_OUTB_ERR_MEM_ADDR_LO);
+ hi = readl(base + PCIE_OUTB_ERR_MEM_ADDR_HI);
+ }
+ /* We've got all of the info, clear the error */
+ writel(1, base + PCIE_OUTB_ERR_CLEAR);
+ spin_unlock_irqrestore(&pcie->bridge_lock, flags);
+
+ dev_err(pcie->dev, "reporting PCIe info which may be related to %s error\n",
+ type);
+ width_str = (info & PCIE_OUTB_ERR_ACC_INFO_TYPE_64) ? "64bit" : "32bit";
+ direction_str = str_read_write(!(info & PCIE_OUTB_ERR_ACC_INFO_DIR_WRITE));
+ lanes = FIELD_GET(PCIE_OUTB_ERR_ACC_INFO_BYTE_LANES, info);
+ for (i = 0, lanes_str[8] = 0; i < 8; i++)
+ lanes_str[i] = (lanes & (1 << i)) ? '1' : '0';
+
+ if (is_cfg_err) {
+ int bus = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_BUS, cfg_addr);
+ int dev = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_DEV, cfg_addr);
+ int func = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_FUNC, cfg_addr);
+ int reg = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_REG, cfg_addr);
+
+ dev_err(pcie->dev, "Error: CFG Acc, %s, %s (%04x:%02x:%02x.%d) reg=0x%x, lanes=%s\n",
+ width_str, direction_str, bridge->domain_nr, bus, dev,
+ func, reg, lanes_str);
+ dev_err(pcie->dev, " Type: TO=%d Abt=%d UnsupReq=%d AccTO=%d AccDsbld=%d Acc64bit=%d\n",
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_TIMEOUT),
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ABORT),
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_UNSUPP_REQ),
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ACC_TIMEOUT),
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ACC_DISABLED),
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ACC_64BIT));
+ }
+
+ if (is_mem_err) {
+ u64 addr = ((u64)hi << 32) | (u64)lo;
+
+ dev_err(pcie->dev, "Error: Mem Acc, %s, %s, @0x%llx, lanes=%s\n",
+ width_str, direction_str, addr, lanes_str);
+ dev_err(pcie->dev, " Type: TO=%d Abt=%d UnsupReq=%d AccDsble=%d BadAddr=%d\n",
+ !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_TIMEOUT),
+ !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_ABORT),
+ !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_UNSUPP_REQ),
+ !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_ACC_DISABLED),
+ !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_BAD_ADDR));
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int brcm_pcie_die_notify_cb(struct notifier_block *self,
+ unsigned long v, void *p)
+{
+ struct brcm_pcie *pcie =
+ container_of(self, struct brcm_pcie, die_notifier);
+
+ return brcm_pcie_dump_err(pcie, "Die");
+}
+
+static int brcm_pcie_panic_notify_cb(struct notifier_block *self,
+ unsigned long v, void *p)
+{
+ struct brcm_pcie *pcie =
+ container_of(self, struct brcm_pcie, panic_notifier);
+
+ return brcm_pcie_dump_err(pcie, "Panic");
+}
+
+static void brcm_register_die_notifiers(struct brcm_pcie *pcie)
+{
+ pcie->panic_notifier.notifier_call = brcm_pcie_panic_notify_cb;
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &pcie->panic_notifier);
+
+ pcie->die_notifier.notifier_call = brcm_pcie_die_notify_cb;
+ register_die_notifier(&pcie->die_notifier);
+}
+
+static void brcm_unregister_die_notifiers(struct brcm_pcie *pcie)
+{
+ unregister_die_notifier(&pcie->die_notifier);
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &pcie->panic_notifier);
+}
+
static void __brcm_pcie_remove(struct brcm_pcie *pcie)
{
brcm_msi_remove(pcie);
@@ -1396,87 +1889,138 @@ static void __brcm_pcie_remove(struct brcm_pcie *pcie)
clk_disable_unprepare(pcie->clk);
}
-static int brcm_pcie_remove(struct platform_device *pdev)
+static void brcm_pcie_remove(struct platform_device *pdev)
{
struct brcm_pcie *pcie = platform_get_drvdata(pdev);
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
pci_stop_root_bus(bridge->bus);
pci_remove_root_bus(bridge->bus);
- __brcm_pcie_remove(pcie);
+ if (pcie->cfg->has_err_report)
+ brcm_unregister_die_notifiers(pcie);
- return 0;
+ __brcm_pcie_remove(pcie);
}
static const int pcie_offsets[] = {
- [RGR1_SW_INIT_1] = 0x9210,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ [RGR1_SW_INIT_1] = 0x9210,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x8000,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
+};
+
+static const int pcie_offsets_bcm7278[] = {
+ [RGR1_SW_INIT_1] = 0xc010,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x8000,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
+};
+
+static const int pcie_offsets_bcm7425[] = {
+ [RGR1_SW_INIT_1] = 0x8010,
+ [EXT_CFG_INDEX] = 0x8300,
+ [EXT_CFG_DATA] = 0x8304,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
};
-static const int pcie_offsets_bmips_7425[] = {
- [RGR1_SW_INIT_1] = 0x8010,
- [EXT_CFG_INDEX] = 0x8300,
- [EXT_CFG_DATA] = 0x8304,
+static const int pcie_offsets_bcm7712[] = {
+ [RGR1_SW_INIT_1] = 0x9210,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x8000,
+ [PCIE_HARD_DEBUG] = 0x4304,
+ [PCIE_INTR2_CPU_BASE] = 0x4400,
};
static const struct pcie_cfg_data generic_cfg = {
.offsets = pcie_offsets,
- .type = GENERIC,
+ .soc_base = GENERIC,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
};
-static const struct pcie_cfg_data bcm7425_cfg = {
- .offsets = pcie_offsets_bmips_7425,
- .type = BCM7425,
+static const struct pcie_cfg_data bcm2711_cfg = {
+ .offsets = pcie_offsets,
+ .soc_base = BCM2711,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
};
-static const struct pcie_cfg_data bcm7435_cfg = {
- .offsets = pcie_offsets,
- .type = BCM7435,
- .perst_set = brcm_pcie_perst_set_generic,
+static const struct pcie_cfg_data bcm2712_cfg = {
+ .offsets = pcie_offsets_bcm7712,
+ .soc_base = BCM7712,
+ .perst_set = brcm_pcie_perst_set_7278,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .post_setup = brcm_pcie_post_setup_bcm2712,
+ .quirks = CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN,
+ .num_inbound_wins = 10,
};
static const struct pcie_cfg_data bcm4908_cfg = {
.offsets = pcie_offsets,
- .type = BCM4908,
+ .soc_base = BCM4908,
.perst_set = brcm_pcie_perst_set_4908,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const int pcie_offset_bcm7278[] = {
- [RGR1_SW_INIT_1] = 0xc010,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ .num_inbound_wins = 3,
};
static const struct pcie_cfg_data bcm7278_cfg = {
- .offsets = pcie_offset_bcm7278,
- .type = BCM7278,
+ .offsets = pcie_offsets_bcm7278,
+ .soc_base = BCM7278,
.perst_set = brcm_pcie_perst_set_7278,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+ .num_inbound_wins = 3,
};
-static const struct pcie_cfg_data bcm2711_cfg = {
+static const struct pcie_cfg_data bcm7425_cfg = {
+ .offsets = pcie_offsets_bcm7425,
+ .soc_base = BCM7425,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
+};
+
+static const struct pcie_cfg_data bcm7435_cfg = {
.offsets = pcie_offsets,
- .type = BCM2711,
+ .soc_base = BCM7435,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
+};
+
+static const struct pcie_cfg_data bcm7216_cfg = {
+ .offsets = pcie_offsets_bcm7278,
+ .soc_base = BCM7278,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+ .has_phy = true,
+ .num_inbound_wins = 3,
+ .has_err_report = true,
+};
+
+static const struct pcie_cfg_data bcm7712_cfg = {
+ .offsets = pcie_offsets_bcm7712,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .soc_base = BCM7712,
+ .num_inbound_wins = 10,
};
static const struct of_device_id brcm_pcie_match[] = {
{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
+ { .compatible = "brcm,bcm2712-pcie", .data = &bcm2712_cfg },
{ .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
{ .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7216-pcie", .data = &bcm7216_cfg },
{ .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
- { .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
- { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
- { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
{ .compatible = "brcm,bcm7425-pcie", .data = &bcm7425_cfg },
+ { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
+ { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7712-pcie", .data = &bcm7712_cfg },
{},
};
@@ -1498,7 +2042,7 @@ static struct pci_ops brcm7425_pcie_ops = {
static int brcm_pcie_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node, *msi_np;
+ struct device_node *np = pdev->dev.of_node;
struct pci_host_bridge *bridge;
const struct pcie_cfg_data *data;
struct brcm_pcie *pcie;
@@ -1517,10 +2061,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(bridge);
pcie->dev = &pdev->dev;
pcie->np = np;
- pcie->reg_offsets = data->offsets;
- pcie->type = data->type;
- pcie->perst_set = data->perst_set;
- pcie->bridge_sw_init_set = data->bridge_sw_init_set;
+ pcie->cfg = data;
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
@@ -1535,25 +2076,55 @@ static int brcm_pcie_probe(struct platform_device *pdev)
pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
- ret = clk_prepare_enable(pcie->clk);
- if (ret) {
- dev_err(&pdev->dev, "could not enable clock\n");
- return ret;
- }
pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal");
- if (IS_ERR(pcie->rescal)) {
- clk_disable_unprepare(pcie->clk);
+ if (IS_ERR(pcie->rescal))
return PTR_ERR(pcie->rescal);
- }
+
pcie->perst_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "perst");
- if (IS_ERR(pcie->perst_reset)) {
- clk_disable_unprepare(pcie->clk);
+ if (IS_ERR(pcie->perst_reset))
return PTR_ERR(pcie->perst_reset);
+
+ pcie->bridge_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "bridge");
+ if (IS_ERR(pcie->bridge_reset))
+ return PTR_ERR(pcie->bridge_reset);
+
+ pcie->swinit_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "swinit");
+ if (IS_ERR(pcie->swinit_reset))
+ return PTR_ERR(pcie->swinit_reset);
+
+ ret = clk_prepare_enable(pcie->clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "could not enable clock\n");
+
+ ret = brcm_pcie_bridge_sw_init_set(pcie, 0);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "could not de-assert bridge reset\n");
+
+ if (pcie->swinit_reset) {
+ ret = reset_control_assert(pcie->swinit_reset);
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret,
+ "could not assert reset 'swinit'\n");
+ }
+
+ /* HW team recommends 1us for proper sync and propagation of reset */
+ udelay(1);
+
+ ret = reset_control_deassert(pcie->swinit_reset);
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret,
+ "could not de-assert reset 'swinit'\n");
+ }
}
ret = reset_control_reset(pcie->rescal);
- if (ret)
- dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret, "failed to deassert 'rescal'\n");
+ }
ret = brcm_phy_start(pcie);
if (ret) {
@@ -1567,22 +2138,29 @@ static int brcm_pcie_probe(struct platform_device *pdev)
goto fail;
pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
- if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
+ if (pcie->cfg->soc_base == BCM4908 &&
+ pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
ret = -ENODEV;
goto fail;
}
- msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
- if (pci_msi_enabled() && msi_np == pcie->np) {
- ret = brcm_pcie_enable_msi(pcie);
+ if (pci_msi_enabled()) {
+ struct device_node *msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
+
+ if (msi_np == pcie->np)
+ ret = brcm_pcie_enable_msi(pcie);
+
+ of_node_put(msi_np);
+
if (ret) {
dev_err(pcie->dev, "probe of internal MSI failed");
goto fail;
}
}
- bridge->ops = pcie->type == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
+ bridge->ops = pcie->cfg->soc_base == BCM7425 ?
+ &brcm7425_pcie_ops : &brcm_pcie_ops;
bridge->sysdata = pcie;
platform_set_drvdata(pdev, pcie);
@@ -1596,10 +2174,16 @@ static int brcm_pcie_probe(struct platform_device *pdev)
return ret;
}
+ if (pcie->cfg->has_err_report) {
+ spin_lock_init(&pcie->bridge_lock);
+ brcm_register_die_notifiers(pcie);
+ }
+
return 0;
fail:
__brcm_pcie_remove(pcie);
+
return ret;
}
@@ -1624,3 +2208,4 @@ module_platform_driver(brcm_pcie_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
MODULE_AUTHOR("Broadcom");
+MODULE_SOFTDEP("pre: irq_bcm2712_mip");
diff --git a/drivers/pci/controller/pcie-hisi-error.c b/drivers/pci/controller/pcie-hisi-error.c
index 7959c9c8d2bc..aaf1ed2b6e59 100644
--- a/drivers/pci/controller/pcie-hisi-error.c
+++ b/drivers/pci/controller/pcie-hisi-error.c
@@ -299,13 +299,11 @@ static int hisi_pcie_error_handler_probe(struct platform_device *pdev)
return 0;
}
-static int hisi_pcie_error_handler_remove(struct platform_device *pdev)
+static void hisi_pcie_error_handler_remove(struct platform_device *pdev)
{
struct hisi_pcie_error_private *priv = platform_get_drvdata(pdev);
ghes_unregister_vendor_record_notifier(&priv->nb);
-
- return 0;
}
static const struct acpi_device_id hisi_pcie_acpi_match[] = {
@@ -324,4 +322,3 @@ static struct platform_driver hisi_pcie_error_handler_driver = {
module_platform_driver(hisi_pcie_error_handler_driver);
MODULE_DESCRIPTION("HiSilicon HIP PCIe controller error handling driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index fee036b07cd4..9ba242ab9596 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -5,6 +5,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/of_irq.h>
@@ -81,7 +82,6 @@ struct iproc_msi_grp {
* @bitmap_lock: lock to protect access to the MSI bitmap
* @nr_msi_vecs: total number of MSI vectors
* @inner_domain: inner IRQ domain
- * @msi_domain: MSI IRQ domain
* @nr_eq_region: required number of 4K aligned memory region for MSI event
* queues
* @nr_msi_region: required number of 4K aligned address region for MSI posted
@@ -101,7 +101,6 @@ struct iproc_msi {
struct mutex bitmap_lock;
unsigned int nr_msi_vecs;
struct irq_domain *inner_domain;
- struct irq_domain *msi_domain;
unsigned int nr_eq_region;
unsigned int nr_msi_region;
void *eq_cpu;
@@ -165,16 +164,18 @@ static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq)
return eq * EQ_LEN * sizeof(u32);
}
-static struct irq_chip iproc_msi_irq_chip = {
- .name = "iProc-MSI",
+#define IPROC_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS)
+#define IPROC_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
+
+static struct msi_parent_ops iproc_msi_parent_ops = {
+ .required_flags = IPROC_MSI_FLAGS_REQUIRED,
+ .supported_flags = IPROC_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "iProc-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
-
-static struct msi_domain_info iproc_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX,
- .chip = &iproc_msi_irq_chip,
-};
-
/*
* In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a
* dedicated event queue. Each MSI group can support up to 64 MSI vectors.
@@ -446,27 +447,22 @@ static void iproc_msi_disable(struct iproc_msi *msi)
static int iproc_msi_alloc_domains(struct device_node *node,
struct iproc_msi *msi)
{
- msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs,
- &msi_domain_ops, msi);
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(node),
+ .ops = &msi_domain_ops,
+ .host_data = msi,
+ .size = msi->nr_msi_vecs,
+ };
+
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &iproc_msi_parent_ops);
if (!msi->inner_domain)
return -ENOMEM;
- msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
- &iproc_msi_domain_info,
- msi->inner_domain);
- if (!msi->msi_domain) {
- irq_domain_remove(msi->inner_domain);
- return -ENOMEM;
- }
-
return 0;
}
static void iproc_msi_free_domains(struct iproc_msi *msi)
{
- if (msi->msi_domain)
- irq_domain_remove(msi->msi_domain);
-
if (msi->inner_domain)
irq_domain_remove(msi->inner_domain);
}
@@ -525,7 +521,7 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
if (!of_device_is_compatible(node, "brcm,iproc-msi"))
return -ENODEV;
- if (!of_find_property(node, "msi-controller", NULL))
+ if (!of_property_read_bool(node, "msi-controller"))
return -ENODEV;
if (pcie->msi)
@@ -542,7 +538,7 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
msi->nr_cpus = num_possible_cpus();
if (msi->nr_cpus == 1)
- iproc_msi_domain_info.flags |= MSI_FLAG_MULTI_PCI_MSI;
+ iproc_msi_parent_ops.supported_flags |= MSI_FLAG_MULTI_PCI_MSI;
msi->nr_irqs = of_irq_count(node);
if (!msi->nr_irqs) {
@@ -585,8 +581,7 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
return -EINVAL;
}
- if (of_find_property(node, "brcm,pcie-msi-inten", NULL))
- msi->has_inten_reg = true;
+ msi->has_inten_reg = of_property_read_bool(node, "brcm,pcie-msi-inten");
msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN;
msi->bitmap = devm_bitmap_zalloc(pcie->dev, msi->nr_msi_vecs,
diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c
index 4142a73e611d..0cb78c583c7e 100644
--- a/drivers/pci/controller/pcie-iproc-platform.c
+++ b/drivers/pci/controller/pcie-iproc-platform.c
@@ -52,7 +52,7 @@ static int iproc_pltfm_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(bridge);
pcie->dev = dev;
- pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev);
+ pcie->type = (uintptr_t)of_device_get_match_data(dev);
ret = of_address_to_resource(np, 0, &reg);
if (ret < 0) {
@@ -114,11 +114,11 @@ static int iproc_pltfm_pcie_probe(struct platform_device *pdev)
return 0;
}
-static int iproc_pltfm_pcie_remove(struct platform_device *pdev)
+static void iproc_pltfm_pcie_remove(struct platform_device *pdev)
{
struct iproc_pcie *pcie = platform_get_drvdata(pdev);
- return iproc_pcie_remove(pcie);
+ iproc_pcie_remove(pcie);
}
static void iproc_pltfm_pcie_shutdown(struct platform_device *pdev)
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 83029bdfd884..ccf71993ea35 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -17,6 +17,7 @@
#include <linux/irqchip/arm-gic-v3.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
@@ -54,7 +55,7 @@
#define CFG_RD_SUCCESS 0
#define CFG_RD_UR 1
-#define CFG_RD_CRS 2
+#define CFG_RD_RRS 2
#define CFG_RD_CA 3
#define CFG_RETRY_STATUS 0xffff0001
#define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */
@@ -485,31 +486,31 @@ static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie,
u32 status;
/*
- * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only
+ * As per PCIe r6.0, sec 2.3.2, Config RRS Software Visibility only
* affects config reads of the Vendor ID. For config writes or any
* other config reads, the Root may automatically reissue the
* configuration request again as a new request.
*
* For config reads, this hardware returns CFG_RETRY_STATUS data
- * when it receives a CRS completion, regardless of the address of
- * the read or the CRS Software Visibility Enable bit. As a
+ * when it receives a RRS completion, regardless of the address of
+ * the read or the RRS Software Visibility Enable bit. As a
* partial workaround for this, we retry in software any read that
* returns CFG_RETRY_STATUS.
*
* Note that a non-Vendor ID config register may have a value of
* CFG_RETRY_STATUS. If we read that, we can't distinguish it from
- * a CRS completion, so we will incorrectly retry the read and
+ * a RRS completion, so we will incorrectly retry the read and
* eventually return the wrong data (0xffffffff).
*/
data = readl(cfg_data_p);
while (data == CFG_RETRY_STATUS && timeout--) {
/*
- * CRS state is set in CFG_RD status register
+ * RRS state is set in CFG_RD status register
* This will handle the case where CFG_RETRY_STATUS is
* valid config data.
*/
status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS);
- if (status != CFG_RD_CRS)
+ if (status != CFG_RD_RRS)
return data;
udelay(1);
@@ -556,8 +557,8 @@ static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val)
break;
case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL:
- /* Don't advertise CRS SV support */
- *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+ /* Don't advertise RRS SV support */
+ *val &= ~(PCI_EXP_RTCAP_RRS_SV << 16);
break;
default:
@@ -783,7 +784,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie)
/* make sure we are not in EP mode */
iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type);
- if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
+ if ((hdr_type & PCI_HEADER_TYPE_MASK) != PCI_HEADER_TYPE_BRIDGE) {
dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
return -EFAULT;
}
@@ -1337,29 +1338,16 @@ static int iproc_pcie_msi_steer(struct iproc_pcie *pcie,
static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
{
- struct device_node *msi_node;
+ struct device_node *msi_node = NULL;
int ret;
/*
* Either the "msi-parent" or the "msi-map" phandle needs to exist
* for us to obtain the MSI node.
*/
-
- msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0);
- if (!msi_node) {
- const __be32 *msi_map = NULL;
- int len;
- u32 phandle;
-
- msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len);
- if (!msi_map)
- return -ENODEV;
-
- phandle = be32_to_cpup(msi_map + 1);
- msi_node = of_find_node_by_phandle(phandle);
- if (!msi_node)
- return -ENODEV;
- }
+ of_msi_xlate(pcie->dev, &msi_node, 0);
+ if (!msi_node)
+ return -ENODEV;
/*
* Certain revisions of the iProc PCIe controller require additional
@@ -1537,7 +1525,7 @@ err_exit_phy:
}
EXPORT_SYMBOL(iproc_pcie_setup);
-int iproc_pcie_remove(struct iproc_pcie *pcie)
+void iproc_pcie_remove(struct iproc_pcie *pcie)
{
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
@@ -1548,8 +1536,6 @@ int iproc_pcie_remove(struct iproc_pcie *pcie)
phy_power_off(pcie->phy);
phy_exit(pcie->phy);
-
- return 0;
}
EXPORT_SYMBOL(iproc_pcie_remove);
diff --git a/drivers/pci/controller/pcie-iproc.h b/drivers/pci/controller/pcie-iproc.h
index dcca315897c8..969ded03b8c2 100644
--- a/drivers/pci/controller/pcie-iproc.h
+++ b/drivers/pci/controller/pcie-iproc.h
@@ -111,7 +111,7 @@ struct iproc_pcie {
};
int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res);
-int iproc_pcie_remove(struct iproc_pcie *pcie);
+void iproc_pcie_remove(struct iproc_pcie *pcie);
int iproc_pcie_shutdown(struct iproc_pcie *pcie);
#ifdef CONFIG_PCIE_IPROC_MSI
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index b8612ce5f4d0..75ddb8bee168 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -6,29 +6,47 @@
* Author: Jianjun Wang <jianjun.wang@mediatek.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/msi.h>
+#include <linux/of_device.h>
+#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include "../pci.h"
+#define PCIE_BASE_CFG_REG 0x14
+#define PCIE_BASE_CFG_SPEED GENMASK(15, 8)
+
#define PCIE_SETTING_REG 0x80
+#define PCIE_SETTING_LINK_WIDTH GENMASK(11, 8)
+#define PCIE_SETTING_GEN_SUPPORT GENMASK(14, 12)
#define PCIE_PCI_IDS_1 0x9c
#define PCI_CLASS(class) (class << 8)
#define PCIE_RC_MODE BIT(0)
+#define PCIE_EQ_PRESET_01_REG 0x100
+#define PCIE_VAL_LN0_DOWNSTREAM GENMASK(6, 0)
+#define PCIE_VAL_LN0_UPSTREAM GENMASK(14, 8)
+#define PCIE_VAL_LN1_DOWNSTREAM GENMASK(22, 16)
+#define PCIE_VAL_LN1_UPSTREAM GENMASK(30, 24)
+
#define PCIE_CFGNUM_REG 0x140
#define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
#define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
@@ -68,6 +86,14 @@
#define PCIE_MSI_SET_ENABLE_REG 0x190
#define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
+#define PCIE_PIPE4_PIE8_REG 0x338
+#define PCIE_K_FINETUNE_MAX GENMASK(5, 0)
+#define PCIE_K_FINETUNE_ERR GENMASK(7, 6)
+#define PCIE_K_PRESET_TO_USE GENMASK(18, 8)
+#define PCIE_K_PHYPARAM_QUERY BIT(19)
+#define PCIE_K_QUERY_TIMEOUT BIT(20)
+#define PCIE_K_PRESET_TO_USE_16G GENMASK(31, 21)
+
#define PCIE_MSI_SET_BASE_REG 0xc00
#define PCIE_MSI_SET_OFFSET 0x10
#define PCIE_MSI_SET_STATUS_OFFSET 0x04
@@ -76,6 +102,9 @@
#define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
#define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
+#define PCIE_RESOURCE_CTRL_REG 0xd2c
+#define PCIE_RSRC_SYS_CLK_RDY_TIME_MASK GENMASK(7, 0)
+
#define PCIE_ICMD_PM_REG 0x198
#define PCIE_TURN_OFF_LINK BIT(4)
@@ -100,6 +129,42 @@
#define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
#define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
+#define MAX_NUM_PHY_RESETS 3
+
+#define PCIE_MTK_RESET_TIME_US 10
+
+/* Time in ms needed to complete PCIe reset on EN7581 SoC */
+#define PCIE_EN7581_RESET_TIME_MS 100
+
+struct mtk_gen3_pcie;
+
+#define PCIE_CONF_LINK2_CTL_STS (PCIE_CFG_OFFSET_ADDR + 0xb0)
+#define PCIE_CONF_LINK2_LCR2_LINK_SPEED GENMASK(3, 0)
+
+enum mtk_gen3_pcie_flags {
+ SKIP_PCIE_RSTB = BIT(0), /* Skip PERST# assertion during device
+ * probing or suspend/resume phase to
+ * avoid hw bugs/issues.
+ */
+};
+
+/**
+ * struct mtk_gen3_pcie_pdata - differentiate between host generations
+ * @power_up: pcie power_up callback
+ * @phy_resets: phy reset lines SoC data.
+ * @sys_clk_rdy_time_us: System clock ready time override (microseconds)
+ * @flags: pcie device flags.
+ */
+struct mtk_gen3_pcie_pdata {
+ int (*power_up)(struct mtk_gen3_pcie *pcie);
+ struct {
+ const char *id[MAX_NUM_PHY_RESETS];
+ int num_resets;
+ } phy_resets;
+ u8 sys_clk_rdy_time_us;
+ u32 flags;
+};
+
/**
* struct mtk_msi_set - MSI information for each set
* @base: IO mapped register base
@@ -118,39 +183,44 @@ struct mtk_msi_set {
* @base: IO mapped register base
* @reg_base: physical register base
* @mac_reset: MAC reset control
- * @phy_reset: PHY reset control
+ * @phy_resets: PHY reset controllers
* @phy: PHY controller block
* @clks: PCIe clocks
* @num_clks: PCIe clocks count for this port
+ * @max_link_speed: Maximum link speed (PCIe Gen) for this port
+ * @num_lanes: Number of PCIe lanes for this port
* @irq: PCIe controller interrupt number
* @saved_irq_state: IRQ enable state saved at suspend time
* @irq_lock: lock protecting IRQ register access
* @intx_domain: legacy INTx IRQ domain
- * @msi_domain: MSI IRQ domain
* @msi_bottom_domain: MSI IRQ bottom domain
* @msi_sets: MSI sets information
* @lock: lock protecting IRQ bit map
* @msi_irq_in_use: bit map for assigned MSI IRQ
+ * @soc: pointer to SoC-dependent operations
*/
struct mtk_gen3_pcie {
struct device *dev;
void __iomem *base;
phys_addr_t reg_base;
struct reset_control *mac_reset;
- struct reset_control *phy_reset;
+ struct reset_control_bulk_data phy_resets[MAX_NUM_PHY_RESETS];
struct phy *phy;
struct clk_bulk_data *clks;
int num_clks;
+ u8 max_link_speed;
+ u8 num_lanes;
int irq;
u32 saved_irq_state;
raw_spinlock_t irq_lock;
struct irq_domain *intx_domain;
- struct irq_domain *msi_domain;
struct irq_domain *msi_bottom_domain;
struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
struct mutex lock;
DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
+
+ const struct mtk_gen3_pcie_pdata *soc;
};
/* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
@@ -245,35 +315,61 @@ static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
resource_size_t cpu_addr,
resource_size_t pci_addr,
resource_size_t size,
- unsigned long type, int num)
+ unsigned long type, int *num)
{
+ resource_size_t remaining = size;
+ resource_size_t table_size;
+ resource_size_t addr_align;
+ const char *range_type;
void __iomem *table;
u32 val;
- if (num >= PCIE_MAX_TRANS_TABLES) {
- dev_err(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
- (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
- return -ENODEV;
- }
+ while (remaining && (*num < PCIE_MAX_TRANS_TABLES)) {
+ /* Table size needs to be a power of 2 */
+ table_size = BIT(fls(remaining) - 1);
+
+ if (cpu_addr > 0) {
+ addr_align = BIT(ffs(cpu_addr) - 1);
+ table_size = min(table_size, addr_align);
+ }
+
+ /* Minimum size of translate table is 4KiB */
+ if (table_size < 0x1000) {
+ dev_err(pcie->dev, "illegal table size %#llx\n",
+ (unsigned long long)table_size);
+ return -EINVAL;
+ }
- table = pcie->base + PCIE_TRANS_TABLE_BASE_REG +
- num * PCIE_ATR_TLB_SET_OFFSET;
+ table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET;
+ writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table);
+ writel_relaxed(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
+ writel_relaxed(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
+ writel_relaxed(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
- writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
- table);
- writel_relaxed(upper_32_bits(cpu_addr),
- table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
- writel_relaxed(lower_32_bits(pci_addr),
- table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
- writel_relaxed(upper_32_bits(pci_addr),
- table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
+ if (type == IORESOURCE_IO) {
+ val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
+ range_type = "IO";
+ } else {
+ val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
+ range_type = "MEM";
+ }
- if (type == IORESOURCE_IO)
- val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
- else
- val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
+ writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
- writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
+ dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
+ range_type, *num, (unsigned long long)cpu_addr,
+ (unsigned long long)pci_addr,
+ (unsigned long long)table_size);
+
+ cpu_addr += table_size;
+ pci_addr += table_size;
+ remaining -= table_size;
+ (*num)++;
+ }
+
+ if (remaining)
+ dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
+ (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
return 0;
}
@@ -315,11 +411,43 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
int err;
u32 val;
- /* Set as RC mode */
+ /* Set as RC mode and set controller PCIe Gen speed restriction, if any */
val = readl_relaxed(pcie->base + PCIE_SETTING_REG);
val |= PCIE_RC_MODE;
+ if (pcie->max_link_speed) {
+ val &= ~PCIE_SETTING_GEN_SUPPORT;
+
+ /* Can enable link speed support only from Gen2 onwards */
+ if (pcie->max_link_speed >= 2)
+ val |= FIELD_PREP(PCIE_SETTING_GEN_SUPPORT,
+ GENMASK(pcie->max_link_speed - 2, 0));
+ }
+ if (pcie->num_lanes) {
+ val &= ~PCIE_SETTING_LINK_WIDTH;
+
+ /* Zero means one lane, each bit activates x2/x4/x8/x16 */
+ if (pcie->num_lanes > 1)
+ val |= FIELD_PREP(PCIE_SETTING_LINK_WIDTH,
+ GENMASK(fls(pcie->num_lanes >> 2), 0));
+ }
writel_relaxed(val, pcie->base + PCIE_SETTING_REG);
+ /* Set Link Control 2 (LNKCTL2) speed restriction, if any */
+ if (pcie->max_link_speed) {
+ val = readl_relaxed(pcie->base + PCIE_CONF_LINK2_CTL_STS);
+ val &= ~PCIE_CONF_LINK2_LCR2_LINK_SPEED;
+ val |= FIELD_PREP(PCIE_CONF_LINK2_LCR2_LINK_SPEED, pcie->max_link_speed);
+ writel_relaxed(val, pcie->base + PCIE_CONF_LINK2_CTL_STS);
+ }
+
+ /* If parameter is present, adjust SYS_CLK_RDY_TIME to avoid glitching */
+ if (pcie->soc->sys_clk_rdy_time_us) {
+ val = readl_relaxed(pcie->base + PCIE_RESOURCE_CTRL_REG);
+ FIELD_MODIFY(PCIE_RSRC_SYS_CLK_RDY_TIME_MASK, &val,
+ pcie->soc->sys_clk_rdy_time_us);
+ writel_relaxed(val, pcie->base + PCIE_RESOURCE_CTRL_REG);
+ }
+
/* Set class code */
val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
val &= ~GENMASK(31, 8);
@@ -336,22 +464,33 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
- /* Assert all reset signals */
- val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
- val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
-
/*
- * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
- * and 2.2.1 (Initial Power-Up (G3 to S0)).
- * The deassertion of PERST# should be delayed 100ms (TPVPERL)
- * for the power and clock to become stable.
+ * Airoha EN7581 has a hw bug asserting/releasing PCIE_PE_RSTB signal
+ * causing occasional PCIe link down. In order to overcome the issue,
+ * PCIE_RSTB signals are not asserted/released at this stage and the
+ * PCIe block is reset using en7523_reset_assert() and
+ * en7581_pci_enable().
*/
- msleep(100);
-
- /* De-assert reset signals */
- val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
+ /* Assert all reset signals */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
+ PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+
+ /*
+ * Described in PCIe CEM specification revision 6.0.
+ *
+ * The deassertion of PERST# should be delayed 100ms (TPVPERL)
+ * for the power and clock to become stable.
+ */
+ msleep(PCIE_T_PVPERL_MS);
+
+ /* De-assert reset signals */
+ val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
+ PCIE_PE_RSTB);
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ }
/* Check if the link is up or not */
err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
@@ -380,64 +519,41 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
resource_size_t cpu_addr;
resource_size_t pci_addr;
resource_size_t size;
- const char *range_type;
- if (type == IORESOURCE_IO) {
+ if (type == IORESOURCE_IO)
cpu_addr = pci_pio_to_address(res->start);
- range_type = "IO";
- } else if (type == IORESOURCE_MEM) {
+ else if (type == IORESOURCE_MEM)
cpu_addr = res->start;
- range_type = "MEM";
- } else {
+ else
continue;
- }
pci_addr = res->start - entry->offset;
size = resource_size(res);
err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
- type, table_index);
+ type, &table_index);
if (err)
return err;
-
- dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
- range_type, table_index, (unsigned long long)cpu_addr,
- (unsigned long long)pci_addr, (unsigned long long)size);
-
- table_index++;
}
return 0;
}
-static int mtk_pcie_set_affinity(struct irq_data *data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
-static void mtk_pcie_msi_irq_mask(struct irq_data *data)
-{
- pci_msi_mask_irq(data);
- irq_chip_mask_parent(data);
-}
-
-static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
-{
- pci_msi_unmask_irq(data);
- irq_chip_unmask_parent(data);
-}
-
-static struct irq_chip mtk_msi_irq_chip = {
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = mtk_pcie_msi_irq_mask,
- .irq_unmask = mtk_pcie_msi_irq_unmask,
- .name = "MSI",
-};
-
-static struct msi_domain_info mtk_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
- .chip = &mtk_msi_irq_chip,
+#define MTK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+
+#define MTK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops mtk_msi_parent_ops = {
+ .required_flags = MTK_MSI_FLAGS_REQUIRED,
+ .supported_flags = MTK_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "MTK3-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -502,7 +618,6 @@ static struct irq_chip mtk_msi_bottom_irq_chip = {
.irq_mask = mtk_msi_bottom_irq_mask,
.irq_unmask = mtk_msi_bottom_irq_unmask,
.irq_compose_msi_msg = mtk_compose_msi_msg,
- .irq_set_affinity = mtk_pcie_set_affinity,
.name = "MSI",
};
@@ -603,7 +718,6 @@ static struct irq_chip mtk_intx_irq_chip = {
.irq_mask = mtk_intx_mask,
.irq_unmask = mtk_intx_unmask,
.irq_eoi = mtk_intx_eoi,
- .irq_set_affinity = mtk_pcie_set_affinity,
.name = "INTx",
};
@@ -635,8 +749,8 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
return -ENODEV;
}
- pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
- &intx_domain_ops, pcie);
+ pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
if (!pcie->intx_domain) {
dev_err(dev, "failed to create INTx IRQ domain\n");
ret = -ENODEV;
@@ -646,28 +760,23 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
/* Setup MSI */
mutex_init(&pcie->lock);
- pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
- &mtk_msi_bottom_domain_ops, pcie);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &mtk_msi_bottom_domain_ops,
+ .host_data = pcie,
+ .size = PCIE_MSI_IRQS_NUM,
+ };
+
+ pcie->msi_bottom_domain = msi_create_parent_irq_domain(&info, &mtk_msi_parent_ops);
if (!pcie->msi_bottom_domain) {
dev_err(dev, "failed to create MSI bottom domain\n");
ret = -ENODEV;
goto err_msi_bottom_domain;
}
- pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
- &mtk_msi_domain_info,
- pcie->msi_bottom_domain);
- if (!pcie->msi_domain) {
- dev_err(dev, "failed to create MSI domain\n");
- ret = -ENODEV;
- goto err_msi_domain;
- }
-
of_node_put(intc_node);
return 0;
-err_msi_domain:
- irq_domain_remove(pcie->msi_bottom_domain);
err_msi_bottom_domain:
irq_domain_remove(pcie->intx_domain);
out_put_node:
@@ -682,9 +791,6 @@ static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie)
if (pcie->intx_domain)
irq_domain_remove(pcie->intx_domain);
- if (pcie->msi_domain)
- irq_domain_remove(pcie->msi_domain);
-
if (pcie->msi_bottom_domain)
irq_domain_remove(pcie->msi_bottom_domain);
@@ -760,10 +866,11 @@ static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
{
+ int i, ret, num_resets = pcie->soc->phy_resets.num_resets;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *regs;
- int ret;
+ u32 num_lanes;
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
if (!regs)
@@ -776,12 +883,13 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
pcie->reg_base = regs->start;
- pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
- if (IS_ERR(pcie->phy_reset)) {
- ret = PTR_ERR(pcie->phy_reset);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get PHY reset\n");
+ for (i = 0; i < num_resets; i++)
+ pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i];
+ ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets,
+ pcie->phy_resets);
+ if (ret) {
+ dev_err(dev, "failed to get PHY bulk reset\n");
return ret;
}
@@ -809,16 +917,151 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
return pcie->num_clks;
}
+ ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes);
+ if (ret == 0) {
+ if (num_lanes == 0 || num_lanes > 16 ||
+ (num_lanes != 1 && num_lanes % 2))
+ dev_warn(dev, "invalid num-lanes, using controller defaults\n");
+ else
+ pcie->num_lanes = num_lanes;
+ }
+
return 0;
}
+static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
+{
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct device *dev = pcie->dev;
+ struct resource_entry *entry;
+ struct regmap *pbus_regmap;
+ u32 val, args[2], size;
+ resource_size_t addr;
+ int err;
+
+ /*
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
+ */
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+
+ /* Wait for the time needed to complete the reset lines assert. */
+ msleep(PCIE_EN7581_RESET_TIME_MS);
+
+ /*
+ * Configure PBus base address and base address mask to allow the
+ * hw to detect if a given address is accessible on PCIe controller.
+ */
+ pbus_regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "mediatek,pbus-csr",
+ ARRAY_SIZE(args),
+ args);
+ if (IS_ERR(pbus_regmap))
+ return PTR_ERR(pbus_regmap);
+
+ entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ addr = entry->res->start - entry->offset;
+ regmap_write(pbus_regmap, args[0], lower_32_bits(addr));
+ size = lower_32_bits(resource_size(entry->res));
+ regmap_write(pbus_regmap, args[1], GENMASK(31, __fls(size)));
+
+ /*
+ * Unlike the other MediaTek Gen3 controllers, the Airoha EN7581
+ * requires PHY initialization and power-on before PHY reset deassert.
+ */
+ err = phy_init(pcie->phy);
+ if (err) {
+ dev_err(dev, "failed to initialize PHY\n");
+ return err;
+ }
+
+ err = phy_power_on(pcie->phy);
+ if (err) {
+ dev_err(dev, "failed to power on PHY\n");
+ goto err_phy_on;
+ }
+
+ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ if (err) {
+ dev_err(dev, "failed to deassert PHYs\n");
+ goto err_phy_deassert;
+ }
+
+ /*
+ * Wait for the time needed to complete the bulk de-assert above.
+ * This time is specific for EN7581 SoC.
+ */
+ msleep(PCIE_EN7581_RESET_TIME_MS);
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
+ FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
+ FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
+ FIELD_PREP(PCIE_VAL_LN1_UPSTREAM, 0x41);
+ writel_relaxed(val, pcie->base + PCIE_EQ_PRESET_01_REG);
+
+ val = PCIE_K_PHYPARAM_QUERY | PCIE_K_QUERY_TIMEOUT |
+ FIELD_PREP(PCIE_K_PRESET_TO_USE_16G, 0x80) |
+ FIELD_PREP(PCIE_K_PRESET_TO_USE, 0x2) |
+ FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
+ writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
+
+ err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
+ if (err) {
+ dev_err(dev, "failed to prepare clock\n");
+ goto err_clk_prepare_enable;
+ }
+
+ /*
+ * Airoha EN7581 performs PCIe reset via clk callbacks since it has a
+ * hw issue with PCIE_PE_RSTB signal. Add wait for the time needed to
+ * complete the PCIe reset.
+ */
+ msleep(PCIE_T_PVPERL_MS);
+
+ return 0;
+
+err_clk_prepare_enable:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+err_phy_deassert:
+ phy_power_off(pcie->phy);
+err_phy_on:
+ phy_exit(pcie->phy);
+
+ return err;
+}
+
static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
{
struct device *dev = pcie->dev;
int err;
+ /*
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
+ */
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ reset_control_assert(pcie->mac_reset);
+ usleep_range(PCIE_MTK_RESET_TIME_US, 2 * PCIE_MTK_RESET_TIME_US);
+
/* PHY power on and enable pipe clock */
- reset_control_deassert(pcie->phy_reset);
+ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ if (err) {
+ dev_err(dev, "failed to deassert PHYs\n");
+ return err;
+ }
err = phy_init(pcie->phy);
if (err) {
@@ -854,7 +1097,8 @@ err_clk_init:
err_phy_on:
phy_exit(pcie->phy);
err_phy_init:
- reset_control_assert(pcie->phy_reset);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
return err;
}
@@ -869,30 +1113,56 @@ static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
phy_power_off(pcie->phy);
phy_exit(pcie->phy);
- reset_control_assert(pcie->phy_reset);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+}
+
+static int mtk_pcie_get_controller_max_link_speed(struct mtk_gen3_pcie *pcie)
+{
+ u32 val;
+ int ret;
+
+ val = readl_relaxed(pcie->base + PCIE_BASE_CFG_REG);
+ val = FIELD_GET(PCIE_BASE_CFG_SPEED, val);
+ ret = fls(val);
+
+ return ret > 0 ? ret : -EINVAL;
}
static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
{
- int err;
+ int err, max_speed;
err = mtk_pcie_parse_port(pcie);
if (err)
return err;
/*
- * The controller may have been left out of reset by the bootloader
- * so make sure that we get a clean start by asserting resets here.
+ * Deassert the line in order to avoid unbalance in deassert_count
+ * counter since the bulk is shared.
*/
- reset_control_assert(pcie->phy_reset);
- reset_control_assert(pcie->mac_reset);
- usleep_range(10, 20);
+ reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
/* Don't touch the hardware registers before power up */
- err = mtk_pcie_power_up(pcie);
+ err = pcie->soc->power_up(pcie);
if (err)
return err;
+ err = of_pci_get_max_link_speed(pcie->dev->of_node);
+ if (err) {
+ /* Get the maximum speed supported by the controller */
+ max_speed = mtk_pcie_get_controller_max_link_speed(pcie);
+
+ /* Set max_link_speed only if the controller supports it */
+ if (max_speed >= 0 && max_speed <= err) {
+ pcie->max_link_speed = err;
+ dev_info(pcie->dev,
+ "maximum controller link speed Gen%d, overriding to Gen%u",
+ max_speed, pcie->max_link_speed);
+ }
+ }
+
/* Try link up */
err = mtk_pcie_startup_port(pcie);
if (err)
@@ -924,6 +1194,7 @@ static int mtk_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(host);
pcie->dev = dev;
+ pcie->soc = device_get_match_data(dev);
platform_set_drvdata(pdev, pcie);
err = mtk_pcie_setup(pcie);
@@ -943,7 +1214,7 @@ static int mtk_pcie_probe(struct platform_device *pdev)
return 0;
}
-static int mtk_pcie_remove(struct platform_device *pdev)
+static void mtk_pcie_remove(struct platform_device *pdev)
{
struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev);
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
@@ -955,8 +1226,6 @@ static int mtk_pcie_remove(struct platform_device *pdev)
mtk_pcie_irq_teardown(pcie);
mtk_pcie_power_down(pcie);
-
- return 0;
}
static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
@@ -1023,10 +1292,12 @@ static int mtk_pcie_suspend_noirq(struct device *dev)
return err;
}
- /* Pull down the PERST# pin */
- val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
- val |= PCIE_PE_RSTB;
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
+ /* Assert the PERST# pin */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ }
dev_dbg(pcie->dev, "entered L2 states successfully");
@@ -1041,7 +1312,7 @@ static int mtk_pcie_resume_noirq(struct device *dev)
struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
int err;
- err = mtk_pcie_power_up(pcie);
+ err = pcie->soc->power_up(pcie);
if (err)
return err;
@@ -1061,8 +1332,38 @@ static const struct dev_pm_ops mtk_pcie_pm_ops = {
mtk_pcie_resume_noirq)
};
+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = {
+ .power_up = mtk_pcie_power_up,
+ .phy_resets = {
+ .id[0] = "phy",
+ .num_resets = 1,
+ },
+};
+
+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8196 = {
+ .power_up = mtk_pcie_power_up,
+ .phy_resets = {
+ .id[0] = "phy",
+ .num_resets = 1,
+ },
+ .sys_clk_rdy_time_us = 10,
+};
+
+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
+ .power_up = mtk_pcie_en7581_power_up,
+ .phy_resets = {
+ .id[0] = "phy-lane0",
+ .id[1] = "phy-lane1",
+ .id[2] = "phy-lane2",
+ .num_resets = 3,
+ },
+ .flags = SKIP_PCIE_RSTB,
+};
+
static const struct of_device_id mtk_pcie_of_match[] = {
- { .compatible = "mediatek,mt8192-pcie" },
+ { .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
+ { .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
+ { .compatible = "mediatek,mt8196-pcie", .data = &mtk_pcie_soc_mt8196 },
{},
};
MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
@@ -1074,8 +1375,10 @@ static struct platform_driver mtk_pcie_driver = {
.name = "mtk-pcie-gen3",
.of_match_table = mtk_pcie_of_match,
.pm = &mtk_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
module_platform_driver(mtk_pcie_driver);
+MODULE_DESCRIPTION("MediaTek Gen3 PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index ae5ad05ddc1d..4b78b6528f9f 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -12,6 +12,7 @@
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
@@ -142,23 +143,33 @@
struct mtk_pcie_port;
/**
+ * enum mtk_pcie_quirks - MTK PCIe quirks
+ * @MTK_PCIE_FIX_CLASS_ID: host's class ID needed to be fixed
+ * @MTK_PCIE_FIX_DEVICE_ID: host's device ID needed to be fixed
+ * @MTK_PCIE_NO_MSI: Bridge has no MSI support, and relies on an external block
+ * @MTK_PCIE_SKIP_RSTB: Skip calling RSTB bits on PCIe probe
+ */
+enum mtk_pcie_quirks {
+ MTK_PCIE_FIX_CLASS_ID = BIT(0),
+ MTK_PCIE_FIX_DEVICE_ID = BIT(1),
+ MTK_PCIE_NO_MSI = BIT(2),
+ MTK_PCIE_SKIP_RSTB = BIT(3),
+};
+
+/**
* struct mtk_pcie_soc - differentiate between host generations
- * @need_fix_class_id: whether this host's class ID needed to be fixed or not
- * @need_fix_device_id: whether this host's device ID needed to be fixed or not
- * @no_msi: Bridge has no MSI support, and relies on an external block
* @device_id: device ID which this host need to be fixed
* @ops: pointer to configuration access functions
* @startup: pointer to controller setting functions
* @setup_irq: pointer to initialize IRQ functions
+ * @quirks: PCIe device quirks.
*/
struct mtk_pcie_soc {
- bool need_fix_class_id;
- bool need_fix_device_id;
- bool no_msi;
unsigned int device_id;
struct pci_ops *ops;
int (*startup)(struct mtk_pcie_port *port);
int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
+ enum mtk_pcie_quirks quirks;
};
/**
@@ -180,7 +191,6 @@ struct mtk_pcie_soc {
* @irq: GIC irq
* @irq_domain: legacy INTx IRQ domain
* @inner_domain: inner IRQ domain
- * @msi_domain: MSI IRQ domain
* @lock: protect the msi_irq_in_use bitmap
* @msi_irq_in_use: bit map for assigned MSI IRQ
*/
@@ -200,7 +210,6 @@ struct mtk_pcie_port {
int irq;
struct irq_domain *irq_domain;
struct irq_domain *inner_domain;
- struct irq_domain *msi_domain;
struct mutex lock;
DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
};
@@ -211,7 +220,6 @@ struct mtk_pcie_port {
* @base: IO mapped register base
* @cfg: IO mapped register map for PCIe config
* @free_ck: free-run reference clock
- * @mem: non-prefetchable memory resource
* @ports: pointer to PCIe port information
* @soc: pointer to SoC-dependent operations
*/
@@ -407,12 +415,6 @@ static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int mtk_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void mtk_msi_ack_irq(struct irq_data *data)
{
struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
@@ -424,7 +426,6 @@ static void mtk_msi_ack_irq(struct irq_data *data)
static struct irq_chip mtk_msi_bottom_irq_chip = {
.name = "MTK MSI",
.irq_compose_msi_msg = mtk_compose_msi_msg,
- .irq_set_affinity = mtk_msi_set_affinity,
.irq_ack = mtk_msi_ack_irq,
};
@@ -478,40 +479,39 @@ static const struct irq_domain_ops msi_domain_ops = {
.free = mtk_pcie_irq_domain_free,
};
-static struct irq_chip mtk_msi_irq_chip = {
- .name = "MTK PCIe MSI",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define MTK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define MTK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
-static struct msi_domain_info mtk_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &mtk_msi_irq_chip,
+static const struct msi_parent_ops mtk_msi_parent_ops = {
+ .required_flags = MTK_MSI_FLAGS_REQUIRED,
+ .supported_flags = MTK_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "MTK-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
-
mutex_init(&port->lock);
- port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
- &msi_domain_ops, port);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(port->pcie->dev),
+ .ops = &msi_domain_ops,
+ .host_data = port,
+ .size = MTK_MSI_IRQS_NUM,
+ };
+
+ port->inner_domain = msi_create_parent_irq_domain(&info, &mtk_msi_parent_ops);
if (!port->inner_domain) {
dev_err(port->pcie->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
- port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
- port->inner_domain);
- if (!port->msi_domain) {
- dev_err(port->pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(port->inner_domain);
- return -ENOMEM;
- }
-
return 0;
}
@@ -540,8 +540,6 @@ static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
irq_domain_remove(port->irq_domain);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- if (port->msi_domain)
- irq_domain_remove(port->msi_domain);
if (port->inner_domain)
irq_domain_remove(port->inner_domain);
}
@@ -577,8 +575,8 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
return -ENODEV;
}
- port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
+ port->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
of_node_put(pcie_intc_node);
if (!port->irq_domain) {
dev_err(dev, "failed to get INTx IRQ domain\n");
@@ -617,12 +615,18 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
if (status & MSI_STATUS){
unsigned long imsi_status;
+ /*
+ * The interrupt status can be cleared even if the
+ * MSI status remains pending. As such, given the
+ * edge-triggered interrupt type, its status should
+ * be cleared before being dispatched to the
+ * handler of the underlying device.
+ */
+ writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM)
generic_handle_domain_irq(port->inner_domain, bit);
}
- /* Clear MSI interrupt status */
- writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
}
}
@@ -643,7 +647,7 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
return err;
}
- if (of_find_property(dev->of_node, "interrupt-names", NULL))
+ if (of_property_present(dev->of_node, "interrupt-names"))
port->irq = platform_get_irq_byname(pdev, "pcie_irq");
else
port->irq = platform_get_irq(pdev, port->slot);
@@ -685,31 +689,28 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
regmap_update_bits(pcie->cfg, PCIE_SYS_CFG_V2, val, val);
}
- /* Assert all reset signals */
- writel(0, port->base + PCIE_RST_CTRL);
+ if (!(soc->quirks & MTK_PCIE_SKIP_RSTB)) {
+ /* Assert all reset signals */
+ writel(0, port->base + PCIE_RST_CTRL);
- /*
- * Enable PCIe link down reset, if link status changed from link up to
- * link down, this will reset MAC control registers and configuration
- * space.
- */
- writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
+ /*
+ * Enable PCIe link down reset, if link status changed from
+ * link up to link down, this will reset MAC control registers
+ * and configuration space.
+ */
+ writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
- /*
- * Described in PCIe CEM specification sections 2.2 (PERST# Signal) and
- * 2.2.1 (Initial Power-Up (G3 to S0)). The deassertion of PERST# should
- * be delayed 100ms (TPVPERL) for the power and clock to become stable.
- */
- msleep(100);
+ msleep(PCIE_T_PVPERL_MS);
- /* De-assert PHY, PE, PIPE, MAC and configuration reset */
- val = readl(port->base + PCIE_RST_CTRL);
- val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
- PCIE_MAC_SRSTB | PCIE_CRSTB;
- writel(val, port->base + PCIE_RST_CTRL);
+ /* De-assert PHY, PE, PIPE, MAC and configuration reset */
+ val = readl(port->base + PCIE_RST_CTRL);
+ val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
+ PCIE_MAC_SRSTB | PCIE_CRSTB;
+ writel(val, port->base + PCIE_RST_CTRL);
+ }
/* Set up vendor ID and class code */
- if (soc->need_fix_class_id) {
+ if (soc->quirks & MTK_PCIE_FIX_CLASS_ID) {
val = PCI_VENDOR_ID_MEDIATEK;
writew(val, port->base + PCIE_CONF_VEND_ID);
@@ -717,7 +718,7 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
writew(val, port->base + PCIE_CONF_CLASS_ID);
}
- if (soc->need_fix_device_id)
+ if (soc->quirks & MTK_PCIE_FIX_DEVICE_ID)
writew(soc->device_id, port->base + PCIE_CONF_DEVICE_ID);
/* 100ms timeout value should be enough for Gen1/2 training */
@@ -827,6 +828,41 @@ static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
return 0;
}
+static int mtk_pcie_startup_port_an7583(struct mtk_pcie_port *port)
+{
+ struct mtk_pcie *pcie = port->pcie;
+ struct device *dev = pcie->dev;
+ struct pci_host_bridge *host;
+ struct resource_entry *entry;
+ struct regmap *pbus_regmap;
+ resource_size_t addr;
+ u32 args[2], size;
+
+ /*
+ * Configure PBus base address and base address mask to allow
+ * the hw to detect if a given address is accessible on PCIe
+ * controller.
+ */
+ pbus_regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "mediatek,pbus-csr",
+ ARRAY_SIZE(args),
+ args);
+ if (IS_ERR(pbus_regmap))
+ return PTR_ERR(pbus_regmap);
+
+ host = pci_host_bridge_from_priv(pcie);
+ entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ addr = entry->res->start - entry->offset;
+ regmap_write(pbus_regmap, args[0], lower_32_bits(addr));
+ size = lower_32_bits(resource_size(entry->res));
+ regmap_write(pbus_regmap, args[1], GENMASK(31, __fls(size)));
+
+ return mtk_pcie_startup_port_v2(port);
+}
+
static void mtk_pcie_enable_port(struct mtk_pcie_port *port)
{
struct mtk_pcie *pcie = port->pcie;
@@ -1043,24 +1079,22 @@ err_free_ck:
static int mtk_pcie_setup(struct mtk_pcie *pcie)
{
struct device *dev = pcie->dev;
- struct device_node *node = dev->of_node, *child;
+ struct device_node *node = dev->of_node;
struct mtk_pcie_port *port, *tmp;
int err, slot;
slot = of_get_pci_domain_nr(dev->of_node);
if (slot < 0) {
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
err = of_pci_get_devfn(child);
- if (err < 0) {
- dev_err(dev, "failed to get devfn: %d\n", err);
- goto error_put_node;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to get devfn\n");
slot = PCI_SLOT(err);
err = mtk_pcie_parse_port(pcie, child, slot);
if (err)
- goto error_put_node;
+ return err;
}
} else {
err = mtk_pcie_parse_port(pcie, node, slot);
@@ -1081,9 +1115,6 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
mtk_pcie_subsys_powerdown(pcie);
return 0;
-error_put_node:
- of_node_put(child);
- return err;
}
static int mtk_pcie_probe(struct platform_device *pdev)
@@ -1110,7 +1141,7 @@ static int mtk_pcie_probe(struct platform_device *pdev)
host->ops = pcie->soc->ops;
host->sysdata = pcie;
- host->msi_domain = pcie->soc->no_msi;
+ host->msi_domain = !!(pcie->soc->quirks & MTK_PCIE_NO_MSI);
err = pci_host_probe(host);
if (err)
@@ -1134,7 +1165,7 @@ static void mtk_pcie_free_resources(struct mtk_pcie *pcie)
pci_free_resource_list(windows);
}
-static int mtk_pcie_remove(struct platform_device *pdev)
+static void mtk_pcie_remove(struct platform_device *pdev)
{
struct mtk_pcie *pcie = platform_get_drvdata(pdev);
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
@@ -1146,8 +1177,6 @@ static int mtk_pcie_remove(struct platform_device *pdev)
mtk_pcie_irq_teardown(pcie);
mtk_pcie_put_resources(pcie);
-
- return 0;
}
static int mtk_pcie_suspend_noirq(struct device *dev)
@@ -1200,9 +1229,9 @@ static const struct dev_pm_ops mtk_pcie_pm_ops = {
};
static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
- .no_msi = true,
.ops = &mtk_pcie_ops,
.startup = mtk_pcie_startup_port,
+ .quirks = MTK_PCIE_NO_MSI,
};
static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
@@ -1212,22 +1241,29 @@ static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
};
static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
- .need_fix_class_id = true,
.ops = &mtk_pcie_ops_v2,
.startup = mtk_pcie_startup_port_v2,
.setup_irq = mtk_pcie_setup_irq,
+ .quirks = MTK_PCIE_FIX_CLASS_ID,
+};
+
+static const struct mtk_pcie_soc mtk_pcie_soc_an7583 = {
+ .ops = &mtk_pcie_ops_v2,
+ .startup = mtk_pcie_startup_port_an7583,
+ .setup_irq = mtk_pcie_setup_irq,
+ .quirks = MTK_PCIE_FIX_CLASS_ID | MTK_PCIE_SKIP_RSTB,
};
static const struct mtk_pcie_soc mtk_pcie_soc_mt7629 = {
- .need_fix_class_id = true,
- .need_fix_device_id = true,
.device_id = PCI_DEVICE_ID_MEDIATEK_7629,
.ops = &mtk_pcie_ops_v2,
.startup = mtk_pcie_startup_port_v2,
.setup_irq = mtk_pcie_setup_irq,
+ .quirks = MTK_PCIE_FIX_CLASS_ID | MTK_PCIE_FIX_DEVICE_ID,
};
static const struct of_device_id mtk_pcie_ids[] = {
+ { .compatible = "airoha,an7583-pcie", .data = &mtk_pcie_soc_an7583 },
{ .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
{ .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
{ .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
@@ -1248,4 +1284,5 @@ static struct platform_driver mtk_pcie_driver = {
},
};
module_platform_driver(mtk_pcie_driver);
+MODULE_DESCRIPTION("MediaTek PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
deleted file mode 100644
index 0ebf7015e9af..000000000000
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ /dev/null
@@ -1,1140 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Microchip AXI PCIe Bridge host controller driver
- *
- * Copyright (c) 2018 - 2020 Microchip Corporation. All rights reserved.
- *
- * Author: Daire McNamara <daire.mcnamara@microchip.com>
- */
-
-#include <linux/clk.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/irqdomain.h>
-#include <linux/module.h>
-#include <linux/msi.h>
-#include <linux/of_address.h>
-#include <linux/of_pci.h>
-#include <linux/pci-ecam.h>
-#include <linux/platform_device.h>
-
-#include "../pci.h"
-
-/* Number of MSI IRQs */
-#define MC_NUM_MSI_IRQS 32
-#define MC_NUM_MSI_IRQS_CODED 5
-
-/* PCIe Bridge Phy and Controller Phy offsets */
-#define MC_PCIE1_BRIDGE_ADDR 0x00008000u
-#define MC_PCIE1_CTRL_ADDR 0x0000a000u
-
-#define MC_PCIE_BRIDGE_ADDR (MC_PCIE1_BRIDGE_ADDR)
-#define MC_PCIE_CTRL_ADDR (MC_PCIE1_CTRL_ADDR)
-
-/* PCIe Controller Phy Regs */
-#define SEC_ERROR_CNT 0x20
-#define DED_ERROR_CNT 0x24
-#define SEC_ERROR_INT 0x28
-#define SEC_ERROR_INT_TX_RAM_SEC_ERR_INT GENMASK(3, 0)
-#define SEC_ERROR_INT_RX_RAM_SEC_ERR_INT GENMASK(7, 4)
-#define SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT GENMASK(11, 8)
-#define SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT GENMASK(15, 12)
-#define NUM_SEC_ERROR_INTS (4)
-#define SEC_ERROR_INT_MASK 0x2c
-#define DED_ERROR_INT 0x30
-#define DED_ERROR_INT_TX_RAM_DED_ERR_INT GENMASK(3, 0)
-#define DED_ERROR_INT_RX_RAM_DED_ERR_INT GENMASK(7, 4)
-#define DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT GENMASK(11, 8)
-#define DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT GENMASK(15, 12)
-#define NUM_DED_ERROR_INTS (4)
-#define DED_ERROR_INT_MASK 0x34
-#define ECC_CONTROL 0x38
-#define ECC_CONTROL_TX_RAM_INJ_ERROR_0 BIT(0)
-#define ECC_CONTROL_TX_RAM_INJ_ERROR_1 BIT(1)
-#define ECC_CONTROL_TX_RAM_INJ_ERROR_2 BIT(2)
-#define ECC_CONTROL_TX_RAM_INJ_ERROR_3 BIT(3)
-#define ECC_CONTROL_RX_RAM_INJ_ERROR_0 BIT(4)
-#define ECC_CONTROL_RX_RAM_INJ_ERROR_1 BIT(5)
-#define ECC_CONTROL_RX_RAM_INJ_ERROR_2 BIT(6)
-#define ECC_CONTROL_RX_RAM_INJ_ERROR_3 BIT(7)
-#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_0 BIT(8)
-#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_1 BIT(9)
-#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_2 BIT(10)
-#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_3 BIT(11)
-#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_0 BIT(12)
-#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_1 BIT(13)
-#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_2 BIT(14)
-#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_3 BIT(15)
-#define ECC_CONTROL_TX_RAM_ECC_BYPASS BIT(24)
-#define ECC_CONTROL_RX_RAM_ECC_BYPASS BIT(25)
-#define ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS BIT(26)
-#define ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS BIT(27)
-#define LTSSM_STATE 0x5c
-#define LTSSM_L0_STATE 0x10
-#define PCIE_EVENT_INT 0x14c
-#define PCIE_EVENT_INT_L2_EXIT_INT BIT(0)
-#define PCIE_EVENT_INT_HOTRST_EXIT_INT BIT(1)
-#define PCIE_EVENT_INT_DLUP_EXIT_INT BIT(2)
-#define PCIE_EVENT_INT_MASK GENMASK(2, 0)
-#define PCIE_EVENT_INT_L2_EXIT_INT_MASK BIT(16)
-#define PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK BIT(17)
-#define PCIE_EVENT_INT_DLUP_EXIT_INT_MASK BIT(18)
-#define PCIE_EVENT_INT_ENB_MASK GENMASK(18, 16)
-#define PCIE_EVENT_INT_ENB_SHIFT 16
-#define NUM_PCIE_EVENTS (3)
-
-/* PCIe Bridge Phy Regs */
-#define PCIE_PCI_IDS_DW1 0x9c
-
-/* PCIe Config space MSI capability structure */
-#define MC_MSI_CAP_CTRL_OFFSET 0xe0u
-#define MC_MSI_MAX_Q_AVAIL (MC_NUM_MSI_IRQS_CODED << 1)
-#define MC_MSI_Q_SIZE (MC_NUM_MSI_IRQS_CODED << 4)
-
-#define IMASK_LOCAL 0x180
-#define DMA_END_ENGINE_0_MASK 0x00000000u
-#define DMA_END_ENGINE_0_SHIFT 0
-#define DMA_END_ENGINE_1_MASK 0x00000000u
-#define DMA_END_ENGINE_1_SHIFT 1
-#define DMA_ERROR_ENGINE_0_MASK 0x00000100u
-#define DMA_ERROR_ENGINE_0_SHIFT 8
-#define DMA_ERROR_ENGINE_1_MASK 0x00000200u
-#define DMA_ERROR_ENGINE_1_SHIFT 9
-#define A_ATR_EVT_POST_ERR_MASK 0x00010000u
-#define A_ATR_EVT_POST_ERR_SHIFT 16
-#define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u
-#define A_ATR_EVT_FETCH_ERR_SHIFT 17
-#define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u
-#define A_ATR_EVT_DISCARD_ERR_SHIFT 18
-#define A_ATR_EVT_DOORBELL_MASK 0x00000000u
-#define A_ATR_EVT_DOORBELL_SHIFT 19
-#define P_ATR_EVT_POST_ERR_MASK 0x00100000u
-#define P_ATR_EVT_POST_ERR_SHIFT 20
-#define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u
-#define P_ATR_EVT_FETCH_ERR_SHIFT 21
-#define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u
-#define P_ATR_EVT_DISCARD_ERR_SHIFT 22
-#define P_ATR_EVT_DOORBELL_MASK 0x00000000u
-#define P_ATR_EVT_DOORBELL_SHIFT 23
-#define PM_MSI_INT_INTA_MASK 0x01000000u
-#define PM_MSI_INT_INTA_SHIFT 24
-#define PM_MSI_INT_INTB_MASK 0x02000000u
-#define PM_MSI_INT_INTB_SHIFT 25
-#define PM_MSI_INT_INTC_MASK 0x04000000u
-#define PM_MSI_INT_INTC_SHIFT 26
-#define PM_MSI_INT_INTD_MASK 0x08000000u
-#define PM_MSI_INT_INTD_SHIFT 27
-#define PM_MSI_INT_INTX_MASK 0x0f000000u
-#define PM_MSI_INT_INTX_SHIFT 24
-#define PM_MSI_INT_MSI_MASK 0x10000000u
-#define PM_MSI_INT_MSI_SHIFT 28
-#define PM_MSI_INT_AER_EVT_MASK 0x20000000u
-#define PM_MSI_INT_AER_EVT_SHIFT 29
-#define PM_MSI_INT_EVENTS_MASK 0x40000000u
-#define PM_MSI_INT_EVENTS_SHIFT 30
-#define PM_MSI_INT_SYS_ERR_MASK 0x80000000u
-#define PM_MSI_INT_SYS_ERR_SHIFT 31
-#define NUM_LOCAL_EVENTS 15
-#define ISTATUS_LOCAL 0x184
-#define IMASK_HOST 0x188
-#define ISTATUS_HOST 0x18c
-#define MSI_ADDR 0x190
-#define ISTATUS_MSI 0x194
-
-/* PCIe Master table init defines */
-#define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
-#define ATR0_PCIE_ATR_SIZE 0x25
-#define ATR0_PCIE_ATR_SIZE_SHIFT 1
-#define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
-#define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u
-#define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu
-#define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u
-
-/* PCIe AXI slave table init defines */
-#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
-#define ATR_SIZE_SHIFT 1
-#define ATR_IMPL_ENABLE 1
-#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
-#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
-#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
-#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
-#define PCIE_TX_RX_INTERFACE 0x00000000u
-#define PCIE_CONFIG_INTERFACE 0x00000001u
-
-#define ATR_ENTRY_SIZE 32
-
-#define EVENT_PCIE_L2_EXIT 0
-#define EVENT_PCIE_HOTRST_EXIT 1
-#define EVENT_PCIE_DLUP_EXIT 2
-#define EVENT_SEC_TX_RAM_SEC_ERR 3
-#define EVENT_SEC_RX_RAM_SEC_ERR 4
-#define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR 5
-#define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR 6
-#define EVENT_DED_TX_RAM_DED_ERR 7
-#define EVENT_DED_RX_RAM_DED_ERR 8
-#define EVENT_DED_AXI2PCIE_RAM_DED_ERR 9
-#define EVENT_DED_PCIE2AXI_RAM_DED_ERR 10
-#define EVENT_LOCAL_DMA_END_ENGINE_0 11
-#define EVENT_LOCAL_DMA_END_ENGINE_1 12
-#define EVENT_LOCAL_DMA_ERROR_ENGINE_0 13
-#define EVENT_LOCAL_DMA_ERROR_ENGINE_1 14
-#define EVENT_LOCAL_A_ATR_EVT_POST_ERR 15
-#define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR 16
-#define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR 17
-#define EVENT_LOCAL_A_ATR_EVT_DOORBELL 18
-#define EVENT_LOCAL_P_ATR_EVT_POST_ERR 19
-#define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR 20
-#define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR 21
-#define EVENT_LOCAL_P_ATR_EVT_DOORBELL 22
-#define EVENT_LOCAL_PM_MSI_INT_INTX 23
-#define EVENT_LOCAL_PM_MSI_INT_MSI 24
-#define EVENT_LOCAL_PM_MSI_INT_AER_EVT 25
-#define EVENT_LOCAL_PM_MSI_INT_EVENTS 26
-#define EVENT_LOCAL_PM_MSI_INT_SYS_ERR 27
-#define NUM_EVENTS 28
-
-#define PCIE_EVENT_CAUSE(x, s) \
- [EVENT_PCIE_ ## x] = { __stringify(x), s }
-
-#define SEC_ERROR_CAUSE(x, s) \
- [EVENT_SEC_ ## x] = { __stringify(x), s }
-
-#define DED_ERROR_CAUSE(x, s) \
- [EVENT_DED_ ## x] = { __stringify(x), s }
-
-#define LOCAL_EVENT_CAUSE(x, s) \
- [EVENT_LOCAL_ ## x] = { __stringify(x), s }
-
-#define PCIE_EVENT(x) \
- .base = MC_PCIE_CTRL_ADDR, \
- .offset = PCIE_EVENT_INT, \
- .mask_offset = PCIE_EVENT_INT, \
- .mask_high = 1, \
- .mask = PCIE_EVENT_INT_ ## x ## _INT, \
- .enb_mask = PCIE_EVENT_INT_ENB_MASK
-
-#define SEC_EVENT(x) \
- .base = MC_PCIE_CTRL_ADDR, \
- .offset = SEC_ERROR_INT, \
- .mask_offset = SEC_ERROR_INT_MASK, \
- .mask = SEC_ERROR_INT_ ## x ## _INT, \
- .mask_high = 1, \
- .enb_mask = 0
-
-#define DED_EVENT(x) \
- .base = MC_PCIE_CTRL_ADDR, \
- .offset = DED_ERROR_INT, \
- .mask_offset = DED_ERROR_INT_MASK, \
- .mask_high = 1, \
- .mask = DED_ERROR_INT_ ## x ## _INT, \
- .enb_mask = 0
-
-#define LOCAL_EVENT(x) \
- .base = MC_PCIE_BRIDGE_ADDR, \
- .offset = ISTATUS_LOCAL, \
- .mask_offset = IMASK_LOCAL, \
- .mask_high = 0, \
- .mask = x ## _MASK, \
- .enb_mask = 0
-
-#define PCIE_EVENT_TO_EVENT_MAP(x) \
- { PCIE_EVENT_INT_ ## x ## _INT, EVENT_PCIE_ ## x }
-
-#define SEC_ERROR_TO_EVENT_MAP(x) \
- { SEC_ERROR_INT_ ## x ## _INT, EVENT_SEC_ ## x }
-
-#define DED_ERROR_TO_EVENT_MAP(x) \
- { DED_ERROR_INT_ ## x ## _INT, EVENT_DED_ ## x }
-
-#define LOCAL_STATUS_TO_EVENT_MAP(x) \
- { x ## _MASK, EVENT_LOCAL_ ## x }
-
-struct event_map {
- u32 reg_mask;
- u32 event_bit;
-};
-
-struct mc_msi {
- struct mutex lock; /* Protect used bitmap */
- struct irq_domain *msi_domain;
- struct irq_domain *dev_domain;
- u32 num_vectors;
- u64 vector_phy;
- DECLARE_BITMAP(used, MC_NUM_MSI_IRQS);
-};
-
-struct mc_pcie {
- void __iomem *axi_base_addr;
- struct device *dev;
- struct irq_domain *intx_domain;
- struct irq_domain *event_domain;
- raw_spinlock_t lock;
- struct mc_msi msi;
-};
-
-struct cause {
- const char *sym;
- const char *str;
-};
-
-static const struct cause event_cause[NUM_EVENTS] = {
- PCIE_EVENT_CAUSE(L2_EXIT, "L2 exit event"),
- PCIE_EVENT_CAUSE(HOTRST_EXIT, "Hot reset exit event"),
- PCIE_EVENT_CAUSE(DLUP_EXIT, "DLUP exit event"),
- SEC_ERROR_CAUSE(TX_RAM_SEC_ERR, "sec error in tx buffer"),
- SEC_ERROR_CAUSE(RX_RAM_SEC_ERR, "sec error in rx buffer"),
- SEC_ERROR_CAUSE(PCIE2AXI_RAM_SEC_ERR, "sec error in pcie2axi buffer"),
- SEC_ERROR_CAUSE(AXI2PCIE_RAM_SEC_ERR, "sec error in axi2pcie buffer"),
- DED_ERROR_CAUSE(TX_RAM_DED_ERR, "ded error in tx buffer"),
- DED_ERROR_CAUSE(RX_RAM_DED_ERR, "ded error in rx buffer"),
- DED_ERROR_CAUSE(PCIE2AXI_RAM_DED_ERR, "ded error in pcie2axi buffer"),
- DED_ERROR_CAUSE(AXI2PCIE_RAM_DED_ERR, "ded error in axi2pcie buffer"),
- LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_0, "dma engine 0 error"),
- LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_1, "dma engine 1 error"),
- LOCAL_EVENT_CAUSE(A_ATR_EVT_POST_ERR, "axi write request error"),
- LOCAL_EVENT_CAUSE(A_ATR_EVT_FETCH_ERR, "axi read request error"),
- LOCAL_EVENT_CAUSE(A_ATR_EVT_DISCARD_ERR, "axi read timeout"),
- LOCAL_EVENT_CAUSE(P_ATR_EVT_POST_ERR, "pcie write request error"),
- LOCAL_EVENT_CAUSE(P_ATR_EVT_FETCH_ERR, "pcie read request error"),
- LOCAL_EVENT_CAUSE(P_ATR_EVT_DISCARD_ERR, "pcie read timeout"),
- LOCAL_EVENT_CAUSE(PM_MSI_INT_AER_EVT, "aer event"),
- LOCAL_EVENT_CAUSE(PM_MSI_INT_EVENTS, "pm/ltr/hotplug event"),
- LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"),
-};
-
-static struct event_map pcie_event_to_event[] = {
- PCIE_EVENT_TO_EVENT_MAP(L2_EXIT),
- PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT),
- PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT),
-};
-
-static struct event_map sec_error_to_event[] = {
- SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR),
- SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR),
- SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR),
- SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR),
-};
-
-static struct event_map ded_error_to_event[] = {
- DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR),
- DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR),
- DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR),
- DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR),
-};
-
-static struct event_map local_status_to_event[] = {
- LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0),
- LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1),
- LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0),
- LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_1),
- LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_POST_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_FETCH_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DISCARD_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DOORBELL),
- LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_POST_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_FETCH_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DISCARD_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DOORBELL),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_INTX),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_MSI),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_AER_EVT),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_EVENTS),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_SYS_ERR),
-};
-
-static struct {
- u32 base;
- u32 offset;
- u32 mask;
- u32 shift;
- u32 enb_mask;
- u32 mask_high;
- u32 mask_offset;
-} event_descs[] = {
- { PCIE_EVENT(L2_EXIT) },
- { PCIE_EVENT(HOTRST_EXIT) },
- { PCIE_EVENT(DLUP_EXIT) },
- { SEC_EVENT(TX_RAM_SEC_ERR) },
- { SEC_EVENT(RX_RAM_SEC_ERR) },
- { SEC_EVENT(PCIE2AXI_RAM_SEC_ERR) },
- { SEC_EVENT(AXI2PCIE_RAM_SEC_ERR) },
- { DED_EVENT(TX_RAM_DED_ERR) },
- { DED_EVENT(RX_RAM_DED_ERR) },
- { DED_EVENT(PCIE2AXI_RAM_DED_ERR) },
- { DED_EVENT(AXI2PCIE_RAM_DED_ERR) },
- { LOCAL_EVENT(DMA_END_ENGINE_0) },
- { LOCAL_EVENT(DMA_END_ENGINE_1) },
- { LOCAL_EVENT(DMA_ERROR_ENGINE_0) },
- { LOCAL_EVENT(DMA_ERROR_ENGINE_1) },
- { LOCAL_EVENT(A_ATR_EVT_POST_ERR) },
- { LOCAL_EVENT(A_ATR_EVT_FETCH_ERR) },
- { LOCAL_EVENT(A_ATR_EVT_DISCARD_ERR) },
- { LOCAL_EVENT(A_ATR_EVT_DOORBELL) },
- { LOCAL_EVENT(P_ATR_EVT_POST_ERR) },
- { LOCAL_EVENT(P_ATR_EVT_FETCH_ERR) },
- { LOCAL_EVENT(P_ATR_EVT_DISCARD_ERR) },
- { LOCAL_EVENT(P_ATR_EVT_DOORBELL) },
- { LOCAL_EVENT(PM_MSI_INT_INTX) },
- { LOCAL_EVENT(PM_MSI_INT_MSI) },
- { LOCAL_EVENT(PM_MSI_INT_AER_EVT) },
- { LOCAL_EVENT(PM_MSI_INT_EVENTS) },
- { LOCAL_EVENT(PM_MSI_INT_SYS_ERR) },
-};
-
-static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" };
-
-static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *base)
-{
- struct mc_msi *msi = &port->msi;
- u32 cap_offset = MC_MSI_CAP_CTRL_OFFSET;
- u16 msg_ctrl = readw_relaxed(base + cap_offset + PCI_MSI_FLAGS);
-
- msg_ctrl |= PCI_MSI_FLAGS_ENABLE;
- msg_ctrl &= ~PCI_MSI_FLAGS_QMASK;
- msg_ctrl |= MC_MSI_MAX_Q_AVAIL;
- msg_ctrl &= ~PCI_MSI_FLAGS_QSIZE;
- msg_ctrl |= MC_MSI_Q_SIZE;
- msg_ctrl |= PCI_MSI_FLAGS_64BIT;
-
- writew_relaxed(msg_ctrl, base + cap_offset + PCI_MSI_FLAGS);
-
- writel_relaxed(lower_32_bits(msi->vector_phy),
- base + cap_offset + PCI_MSI_ADDRESS_LO);
- writel_relaxed(upper_32_bits(msi->vector_phy),
- base + cap_offset + PCI_MSI_ADDRESS_HI);
-}
-
-static void mc_handle_msi(struct irq_desc *desc)
-{
- struct mc_pcie *port = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct device *dev = port->dev;
- struct mc_msi *msi = &port->msi;
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long status;
- u32 bit;
- int ret;
-
- chained_irq_enter(chip, desc);
-
- status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
- if (status & PM_MSI_INT_MSI_MASK) {
- writel_relaxed(status & PM_MSI_INT_MSI_MASK, bridge_base_addr + ISTATUS_LOCAL);
- status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
- for_each_set_bit(bit, &status, msi->num_vectors) {
- ret = generic_handle_domain_irq(msi->dev_domain, bit);
- if (ret)
- dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
- bit);
- }
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static void mc_msi_bottom_irq_ack(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- u32 bitpos = data->hwirq;
-
- writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
-}
-
-static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- phys_addr_t addr = port->msi.vector_phy;
-
- msg->address_lo = lower_32_bits(addr);
- msg->address_hi = upper_32_bits(addr);
- msg->data = data->hwirq;
-
- dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
- (int)data->hwirq, msg->address_hi, msg->address_lo);
-}
-
-static int mc_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
-static struct irq_chip mc_msi_bottom_irq_chip = {
- .name = "Microchip MSI",
- .irq_ack = mc_msi_bottom_irq_ack,
- .irq_compose_msi_msg = mc_compose_msi_msg,
- .irq_set_affinity = mc_msi_set_affinity,
-};
-
-static int mc_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *args)
-{
- struct mc_pcie *port = domain->host_data;
- struct mc_msi *msi = &port->msi;
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long bit;
- u32 val;
-
- mutex_lock(&msi->lock);
- bit = find_first_zero_bit(msi->used, msi->num_vectors);
- if (bit >= msi->num_vectors) {
- mutex_unlock(&msi->lock);
- return -ENOSPC;
- }
-
- set_bit(bit, msi->used);
-
- irq_domain_set_info(domain, virq, bit, &mc_msi_bottom_irq_chip,
- domain->host_data, handle_edge_irq, NULL, NULL);
-
- /* Enable MSI interrupts */
- val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
- val |= PM_MSI_INT_MSI_MASK;
- writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
-
- mutex_unlock(&msi->lock);
-
- return 0;
-}
-
-static void mc_irq_msi_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
-{
- struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct mc_pcie *port = irq_data_get_irq_chip_data(d);
- struct mc_msi *msi = &port->msi;
-
- mutex_lock(&msi->lock);
-
- if (test_bit(d->hwirq, msi->used))
- __clear_bit(d->hwirq, msi->used);
- else
- dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
-
- mutex_unlock(&msi->lock);
-}
-
-static const struct irq_domain_ops msi_domain_ops = {
- .alloc = mc_irq_msi_domain_alloc,
- .free = mc_irq_msi_domain_free,
-};
-
-static struct irq_chip mc_msi_irq_chip = {
- .name = "Microchip PCIe MSI",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static struct msi_domain_info mc_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &mc_msi_irq_chip,
-};
-
-static int mc_allocate_msi_domains(struct mc_pcie *port)
-{
- struct device *dev = port->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
- struct mc_msi *msi = &port->msi;
-
- mutex_init(&port->msi.lock);
-
- msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
- &msi_domain_ops, port);
- if (!msi->dev_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
-
- msi->msi_domain = pci_msi_create_irq_domain(fwnode, &mc_msi_domain_info,
- msi->dev_domain);
- if (!msi->msi_domain) {
- dev_err(dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->dev_domain);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void mc_handle_intx(struct irq_desc *desc)
-{
- struct mc_pcie *port = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct device *dev = port->dev;
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long status;
- u32 bit;
- int ret;
-
- chained_irq_enter(chip, desc);
-
- status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
- if (status & PM_MSI_INT_INTX_MASK) {
- status &= PM_MSI_INT_INTX_MASK;
- status >>= PM_MSI_INT_INTX_SHIFT;
- for_each_set_bit(bit, &status, PCI_NUM_INTX) {
- ret = generic_handle_domain_irq(port->intx_domain, bit);
- if (ret)
- dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
- bit);
- }
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static void mc_ack_intx_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
-
- writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
-}
-
-static void mc_mask_intx_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long flags;
- u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
- u32 val;
-
- raw_spin_lock_irqsave(&port->lock, flags);
- val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
- val &= ~mask;
- writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
- raw_spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void mc_unmask_intx_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long flags;
- u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
- u32 val;
-
- raw_spin_lock_irqsave(&port->lock, flags);
- val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
- val |= mask;
- writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
- raw_spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static struct irq_chip mc_intx_irq_chip = {
- .name = "Microchip PCIe INTx",
- .irq_ack = mc_ack_intx_irq,
- .irq_mask = mc_mask_intx_irq,
- .irq_unmask = mc_unmask_intx_irq,
-};
-
-static int mc_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &mc_intx_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, domain->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops intx_domain_ops = {
- .map = mc_pcie_intx_map,
-};
-
-static inline u32 reg_to_event(u32 reg, struct event_map field)
-{
- return (reg & field.reg_mask) ? BIT(field.event_bit) : 0;
-}
-
-static u32 pcie_events(void __iomem *addr)
-{
- u32 reg = readl_relaxed(addr);
- u32 val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pcie_event_to_event); i++)
- val |= reg_to_event(reg, pcie_event_to_event[i]);
-
- return val;
-}
-
-static u32 sec_errors(void __iomem *addr)
-{
- u32 reg = readl_relaxed(addr);
- u32 val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(sec_error_to_event); i++)
- val |= reg_to_event(reg, sec_error_to_event[i]);
-
- return val;
-}
-
-static u32 ded_errors(void __iomem *addr)
-{
- u32 reg = readl_relaxed(addr);
- u32 val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ded_error_to_event); i++)
- val |= reg_to_event(reg, ded_error_to_event[i]);
-
- return val;
-}
-
-static u32 local_events(void __iomem *addr)
-{
- u32 reg = readl_relaxed(addr);
- u32 val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(local_status_to_event); i++)
- val |= reg_to_event(reg, local_status_to_event[i]);
-
- return val;
-}
-
-static u32 get_events(struct mc_pcie *port)
-{
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
- u32 events = 0;
-
- events |= pcie_events(ctrl_base_addr + PCIE_EVENT_INT);
- events |= sec_errors(ctrl_base_addr + SEC_ERROR_INT);
- events |= ded_errors(ctrl_base_addr + DED_ERROR_INT);
- events |= local_events(bridge_base_addr + ISTATUS_LOCAL);
-
- return events;
-}
-
-static irqreturn_t mc_event_handler(int irq, void *dev_id)
-{
- struct mc_pcie *port = dev_id;
- struct device *dev = port->dev;
- struct irq_data *data;
-
- data = irq_domain_get_irq_data(port->event_domain, irq);
-
- if (event_cause[data->hwirq].str)
- dev_err_ratelimited(dev, "%s\n", event_cause[data->hwirq].str);
- else
- dev_err_ratelimited(dev, "bad event IRQ %ld\n", data->hwirq);
-
- return IRQ_HANDLED;
-}
-
-static void mc_handle_event(struct irq_desc *desc)
-{
- struct mc_pcie *port = irq_desc_get_handler_data(desc);
- unsigned long events;
- u32 bit;
- struct irq_chip *chip = irq_desc_get_chip(desc);
-
- chained_irq_enter(chip, desc);
-
- events = get_events(port);
-
- for_each_set_bit(bit, &events, NUM_EVENTS)
- generic_handle_domain_irq(port->event_domain, bit);
-
- chained_irq_exit(chip, desc);
-}
-
-static void mc_ack_event_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- u32 event = data->hwirq;
- void __iomem *addr;
- u32 mask;
-
- addr = port->axi_base_addr + event_descs[event].base +
- event_descs[event].offset;
- mask = event_descs[event].mask;
- mask |= event_descs[event].enb_mask;
-
- writel_relaxed(mask, addr);
-}
-
-static void mc_mask_event_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- u32 event = data->hwirq;
- void __iomem *addr;
- u32 mask;
- u32 val;
-
- addr = port->axi_base_addr + event_descs[event].base +
- event_descs[event].mask_offset;
- mask = event_descs[event].mask;
- if (event_descs[event].enb_mask) {
- mask <<= PCIE_EVENT_INT_ENB_SHIFT;
- mask &= PCIE_EVENT_INT_ENB_MASK;
- }
-
- if (!event_descs[event].mask_high)
- mask = ~mask;
-
- raw_spin_lock(&port->lock);
- val = readl_relaxed(addr);
- if (event_descs[event].mask_high)
- val |= mask;
- else
- val &= mask;
-
- writel_relaxed(val, addr);
- raw_spin_unlock(&port->lock);
-}
-
-static void mc_unmask_event_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- u32 event = data->hwirq;
- void __iomem *addr;
- u32 mask;
- u32 val;
-
- addr = port->axi_base_addr + event_descs[event].base +
- event_descs[event].mask_offset;
- mask = event_descs[event].mask;
-
- if (event_descs[event].enb_mask)
- mask <<= PCIE_EVENT_INT_ENB_SHIFT;
-
- if (event_descs[event].mask_high)
- mask = ~mask;
-
- if (event_descs[event].enb_mask)
- mask &= PCIE_EVENT_INT_ENB_MASK;
-
- raw_spin_lock(&port->lock);
- val = readl_relaxed(addr);
- if (event_descs[event].mask_high)
- val &= mask;
- else
- val |= mask;
- writel_relaxed(val, addr);
- raw_spin_unlock(&port->lock);
-}
-
-static struct irq_chip mc_event_irq_chip = {
- .name = "Microchip PCIe EVENT",
- .irq_ack = mc_ack_event_irq,
- .irq_mask = mc_mask_event_irq,
- .irq_unmask = mc_unmask_event_irq,
-};
-
-static int mc_pcie_event_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &mc_event_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, domain->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops event_domain_ops = {
- .map = mc_pcie_event_map,
-};
-
-static inline struct clk *mc_pcie_init_clk(struct device *dev, const char *id)
-{
- struct clk *clk;
- int ret;
-
- clk = devm_clk_get_optional(dev, id);
- if (IS_ERR(clk))
- return clk;
- if (!clk)
- return clk;
-
- ret = clk_prepare_enable(clk);
- if (ret)
- return ERR_PTR(ret);
-
- devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare,
- clk);
-
- return clk;
-}
-
-static int mc_pcie_init_clks(struct device *dev)
-{
- int i;
- struct clk *fic;
-
- /*
- * PCIe may be clocked via Fabric Interface using between 1 and 4
- * clocks. Scan DT for clocks and enable them if present
- */
- for (i = 0; i < ARRAY_SIZE(poss_clks); i++) {
- fic = mc_pcie_init_clk(dev, poss_clks[i]);
- if (IS_ERR(fic))
- return PTR_ERR(fic);
- }
-
- return 0;
-}
-
-static int mc_pcie_init_irq_domains(struct mc_pcie *port)
-{
- struct device *dev = port->dev;
- struct device_node *node = dev->of_node;
- struct device_node *pcie_intc_node;
-
- /* Setup INTx */
- pcie_intc_node = of_get_next_child(node, NULL);
- if (!pcie_intc_node) {
- dev_err(dev, "failed to find PCIe Intc node\n");
- return -EINVAL;
- }
-
- port->event_domain = irq_domain_add_linear(pcie_intc_node, NUM_EVENTS,
- &event_domain_ops, port);
- if (!port->event_domain) {
- dev_err(dev, "failed to get event domain\n");
- of_node_put(pcie_intc_node);
- return -ENOMEM;
- }
-
- irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
-
- port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
- if (!port->intx_domain) {
- dev_err(dev, "failed to get an INTx IRQ domain\n");
- of_node_put(pcie_intc_node);
- return -ENOMEM;
- }
-
- irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
-
- of_node_put(pcie_intc_node);
- raw_spin_lock_init(&port->lock);
-
- return mc_allocate_msi_domains(port);
-}
-
-static void mc_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
- phys_addr_t axi_addr, phys_addr_t pci_addr,
- size_t size)
-{
- u32 atr_sz = ilog2(size) - 1;
- u32 val;
-
- if (index == 0)
- val = PCIE_CONFIG_INTERFACE;
- else
- val = PCIE_TX_RX_INTERFACE;
-
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_TRSL_PARAM);
-
- val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
- ATR_IMPL_ENABLE;
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_SRCADDR_PARAM);
-
- val = upper_32_bits(axi_addr);
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_SRC_ADDR);
-
- val = lower_32_bits(pci_addr);
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
-
- val = upper_32_bits(pci_addr);
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
-
- val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
- val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
- writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
- writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
-}
-
-static int mc_pcie_setup_windows(struct platform_device *pdev,
- struct mc_pcie *port)
-{
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
- struct resource_entry *entry;
- u64 pci_addr;
- u32 index = 1;
-
- resource_list_for_each_entry(entry, &bridge->windows) {
- if (resource_type(entry->res) == IORESOURCE_MEM) {
- pci_addr = entry->res->start - entry->offset;
- mc_pcie_setup_window(bridge_base_addr, index,
- entry->res->start, pci_addr,
- resource_size(entry->res));
- index++;
- }
- }
-
- return 0;
-}
-
-static int mc_platform_init(struct pci_config_window *cfg)
-{
- struct device *dev = cfg->parent;
- struct platform_device *pdev = to_platform_device(dev);
- struct mc_pcie *port;
- void __iomem *bridge_base_addr;
- void __iomem *ctrl_base_addr;
- int ret;
- int irq;
- int i, intx_irq, msi_irq, event_irq;
- u32 val;
- int err;
-
- port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
- if (!port)
- return -ENOMEM;
- port->dev = dev;
-
- ret = mc_pcie_init_clks(dev);
- if (ret) {
- dev_err(dev, "failed to get clock resources, error %d\n", ret);
- return -ENODEV;
- }
-
- port->axi_base_addr = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(port->axi_base_addr))
- return PTR_ERR(port->axi_base_addr);
-
- bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
-
- port->msi.vector_phy = MSI_ADDR;
- port->msi.num_vectors = MC_NUM_MSI_IRQS;
- ret = mc_pcie_init_irq_domains(port);
- if (ret) {
- dev_err(dev, "failed creating IRQ domains\n");
- return ret;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -ENODEV;
-
- for (i = 0; i < NUM_EVENTS; i++) {
- event_irq = irq_create_mapping(port->event_domain, i);
- if (!event_irq) {
- dev_err(dev, "failed to map hwirq %d\n", i);
- return -ENXIO;
- }
-
- err = devm_request_irq(dev, event_irq, mc_event_handler,
- 0, event_cause[i].sym, port);
- if (err) {
- dev_err(dev, "failed to request IRQ %d\n", event_irq);
- return err;
- }
- }
-
- intx_irq = irq_create_mapping(port->event_domain,
- EVENT_LOCAL_PM_MSI_INT_INTX);
- if (!intx_irq) {
- dev_err(dev, "failed to map INTx interrupt\n");
- return -ENXIO;
- }
-
- /* Plug the INTx chained handler */
- irq_set_chained_handler_and_data(intx_irq, mc_handle_intx, port);
-
- msi_irq = irq_create_mapping(port->event_domain,
- EVENT_LOCAL_PM_MSI_INT_MSI);
- if (!msi_irq)
- return -ENXIO;
-
- /* Plug the MSI chained handler */
- irq_set_chained_handler_and_data(msi_irq, mc_handle_msi, port);
-
- /* Plug the main event chained handler */
- irq_set_chained_handler_and_data(irq, mc_handle_event, port);
-
- /* Hardware doesn't setup MSI by default */
- mc_pcie_enable_msi(port, cfg->win);
-
- val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
- val |= PM_MSI_INT_INTX_MASK;
- writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
-
- writel_relaxed(val, ctrl_base_addr + ECC_CONTROL);
-
- val = PCIE_EVENT_INT_L2_EXIT_INT |
- PCIE_EVENT_INT_HOTRST_EXIT_INT |
- PCIE_EVENT_INT_DLUP_EXIT_INT;
- writel_relaxed(val, ctrl_base_addr + PCIE_EVENT_INT);
-
- val = SEC_ERROR_INT_TX_RAM_SEC_ERR_INT |
- SEC_ERROR_INT_RX_RAM_SEC_ERR_INT |
- SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT |
- SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT;
- writel_relaxed(val, ctrl_base_addr + SEC_ERROR_INT);
- writel_relaxed(0, ctrl_base_addr + SEC_ERROR_INT_MASK);
- writel_relaxed(0, ctrl_base_addr + SEC_ERROR_CNT);
-
- val = DED_ERROR_INT_TX_RAM_DED_ERR_INT |
- DED_ERROR_INT_RX_RAM_DED_ERR_INT |
- DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT |
- DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT;
- writel_relaxed(val, ctrl_base_addr + DED_ERROR_INT);
- writel_relaxed(0, ctrl_base_addr + DED_ERROR_INT_MASK);
- writel_relaxed(0, ctrl_base_addr + DED_ERROR_CNT);
-
- writel_relaxed(0, bridge_base_addr + IMASK_HOST);
- writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST);
-
- /* Configure Address Translation Table 0 for PCIe config space */
- mc_pcie_setup_window(bridge_base_addr, 0, cfg->res.start & 0xffffffff,
- cfg->res.start, resource_size(&cfg->res));
-
- return mc_pcie_setup_windows(pdev, port);
-}
-
-static const struct pci_ecam_ops mc_ecam_ops = {
- .init = mc_platform_init,
- .pci_ops = {
- .map_bus = pci_ecam_map_bus,
- .read = pci_generic_config_read,
- .write = pci_generic_config_write,
- }
-};
-
-static const struct of_device_id mc_pcie_of_match[] = {
- {
- .compatible = "microchip,pcie-host-1.0",
- .data = &mc_ecam_ops,
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, mc_pcie_of_match);
-
-static struct platform_driver mc_pcie_driver = {
- .probe = pci_host_common_probe,
- .driver = {
- .name = "microchip-pcie",
- .of_match_table = mc_pcie_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-builtin_platform_driver(mc_pcie_driver);
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Microchip PCIe host controller driver");
-MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
index ee7aad09d627..01ead2f92e87 100644
--- a/drivers/pci/controller/pcie-mt7621.c
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -60,6 +60,7 @@
#define PCIE_PORT_LINKUP BIT(0)
#define PCIE_PORT_CNT 3
+#define INIT_PORTS_DELAY_MS 100
#define PERST_DELAY_MS 100
/**
@@ -201,7 +202,7 @@ static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
struct mt7621_pcie_port *port;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- char name[10];
+ char name[11];
int err;
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
@@ -257,30 +258,25 @@ static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
{
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- struct device_node *node = dev->of_node, *child;
+ struct device_node *node = dev->of_node;
int err;
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
int slot;
err = of_pci_get_devfn(child);
- if (err < 0) {
- of_node_put(child);
- dev_err(dev, "failed to parse devfn: %d\n", err);
- return err;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse devfn\n");
slot = PCI_SLOT(err);
err = mt7621_pcie_parse_port(pcie, child, slot);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
return 0;
@@ -369,6 +365,7 @@ static int mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
}
}
+ msleep(INIT_PORTS_DELAY_MS);
mt7621_pcie_reset_ep_deassert(pcie);
tmp = NULL;
@@ -376,8 +373,8 @@ static int mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
u32 slot = port->slot;
if (!mt7621_pcie_port_is_linkup(port)) {
- dev_err(dev, "pcie%d no card, disable it (RST & CLK)\n",
- slot);
+ dev_info(dev, "pcie%d no card, disable it (RST & CLK)\n",
+ slot);
mt7621_control_assert(port);
port->enabled = false;
num_disabled++;
@@ -522,15 +519,13 @@ remove_resets:
return err;
}
-static int mt7621_pcie_remove(struct platform_device *pdev)
+static void mt7621_pcie_remove(struct platform_device *pdev)
{
struct mt7621_pcie *pcie = platform_get_drvdata(pdev);
struct mt7621_pcie_port *port;
list_for_each_entry(port, &pcie->ports, list)
reset_control_put(port->pcie_rst);
-
- return 0;
}
static const struct of_device_id mt7621_pcie_ids[] = {
@@ -549,4 +544,5 @@ static struct platform_driver mt7621_pcie_driver = {
};
builtin_platform_driver(mt7621_pcie_driver);
+MODULE_DESCRIPTION("MediaTek MT7621 PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index f9682df1da61..657875ef4657 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -43,7 +43,7 @@ static void rcar_pcie_ep_hw_init(struct rcar_pcie *pcie)
rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ENDPOINT << 4);
- rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
+ rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), PCI_HEADER_TYPE_MASK,
PCI_HEADER_TYPE_NORMAL);
/* Write out the physical slot number = 0 */
@@ -107,7 +107,7 @@ static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep,
}
if (!devm_request_mem_region(&pdev->dev, res->start,
resource_size(res),
- outbound_name)) {
+ res->name)) {
dev_err(pcie->dev, "Cannot request memory region %s.\n",
outbound_name);
return -EIO;
@@ -256,15 +256,15 @@ static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
clear_bit(atu_index + 1, ep->ib_window_map);
}
-static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
- u8 interrupts)
+static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
struct rcar_pcie *pcie = &ep->pcie;
+ u8 mmc = order_base_2(nr_irqs);
u32 flags;
flags = rcar_pci_read_reg(pcie, MSICAP(fn));
- flags |= interrupts << MSICAP0_MMESCAP_OFFSET;
+ flags |= mmc << MSICAP0_MMESCAP_OFFSET;
rcar_pci_write_reg(pcie, flags, MSICAP(fn));
return 0;
@@ -280,7 +280,7 @@ static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
if (!(flags & MSICAP0_MSIE))
return -EINVAL;
- return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
+ return 1 << ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
}
static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
@@ -402,16 +402,15 @@ static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie,
}
static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
+ case PCI_IRQ_INTX:
return rcar_pcie_ep_assert_intx(ep, fn, 0);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
return rcar_pcie_ep_assert_msi(&ep->pcie, fn, interrupt_num);
default:
@@ -437,15 +436,17 @@ static void rcar_pcie_ep_stop(struct pci_epc *epc)
}
static const struct pci_epc_features rcar_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
/* use 64-bit BARs so mark BAR[1,3,5] as reserved */
- .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5,
- .bar_fixed_64bit = 1 << BAR_0 | 1 << BAR_2 | 1 << BAR_4,
- .bar_fixed_size[0] = 128,
- .bar_fixed_size[2] = 256,
- .bar_fixed_size[4] = 256,
+ .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = 128,
+ .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = 256,
+ .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256,
+ .only_64bit = true, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
};
static const struct pci_epc_features*
@@ -539,6 +540,8 @@ static int rcar_pcie_ep_probe(struct platform_device *pdev)
goto err_pm_put;
}
+ pci_epc_init_notify(epc);
+
return 0;
err_pm_put:
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index e4faf90feaf5..213028052aa5 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -12,11 +12,13 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -29,6 +31,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include "pcie-rcar.h"
@@ -36,26 +39,11 @@ struct rcar_msi {
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
struct mutex map_lock;
- spinlock_t mask_lock;
+ raw_spinlock_t mask_lock;
int irq1;
int irq2;
};
-#ifdef CONFIG_ARM
-/*
- * Here we keep a static copy of the remapped PCIe controller address.
- * This is only used on aarch32 systems, all of which have one single
- * PCIe controller, to provide quick access to the PCIe controller in
- * the L1 link state fixup function, called from the ARM fault handler.
- */
-static void __iomem *pcie_base;
-/*
- * Static copy of PCIe device pointer, so we can check whether the
- * device is runtime suspended or not.
- */
-static struct device *pcie_dev;
-#endif
-
/* Structure representing the PCIe interface */
struct rcar_pcie_host {
struct rcar_pcie pcie;
@@ -65,20 +53,13 @@ struct rcar_pcie_host {
int (*phy_init_fn)(struct rcar_pcie_host *host);
};
-static DEFINE_SPINLOCK(pmsr_lock);
-
static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
{
- unsigned long flags;
u32 pmsr, val;
int ret = 0;
- spin_lock_irqsave(&pmsr_lock, flags);
-
- if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
- ret = -EINVAL;
- goto unlock_exit;
- }
+ if (!pcie_base || pm_runtime_suspended(pcie_dev))
+ return -EINVAL;
pmsr = readl(pcie_base + PMSR);
@@ -92,12 +73,14 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
writel(L1IATN, pcie_base + PMCTLR);
ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
val & L1FAEG, 10, 1000);
- WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
+ if (ret) {
+ dev_warn_ratelimited(pcie_dev,
+ "Timeout waiting for L1 link state, ret=%d\n",
+ ret);
+ }
writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
}
-unlock_exit:
- spin_unlock_irqrestore(&pmsr_lock, flags);
return ret;
}
@@ -188,8 +171,8 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host,
* space, it's generally only accessible when in endpoint mode.
* When in root complex mode, the controller is unable to target
* itself with either type 0 or type 1 accesses, and indeed, any
- * controller initiated target transfer to its own config space
- * result in a completer abort.
+ * controller-initiated target transfer to its own config space
+ * results in a completer abort.
*
* Each channel effectively only supports a single device, but as
* the same channel <-> device access works for any PCI_SLOT()
@@ -219,9 +202,9 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host,
/* Enable the configuration access */
if (pci_is_root_bus(bus->parent))
- rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
+ rcar_pci_write_reg(pcie, PCIECCTLR_CCIE | TYPE0, PCIECCTLR);
else
- rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
+ rcar_pci_write_reg(pcie, PCIECCTLR_CCIE | TYPE1, PCIECCTLR);
/* Check for errors */
if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
@@ -475,7 +458,7 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
- rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
+ rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), PCI_HEADER_TYPE_MASK,
PCI_HEADER_TYPE_BRIDGE);
/* Enable data link layer active state reporting */
@@ -593,7 +576,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
unsigned int index = find_first_bit(&reg, 32);
int ret;
- ret = generic_handle_domain_irq(msi->domain->parent, index);
+ ret = generic_handle_domain_irq(msi->domain, index);
if (ret) {
/* Unknown MSI, just clear it */
dev_dbg(dev, "unexpected MSI\n");
@@ -607,30 +590,6 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static void rcar_msi_top_irq_ack(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void rcar_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void rcar_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip rcar_msi_top_chip = {
- .name = "PCIe MSI",
- .irq_ack = rcar_msi_top_irq_ack,
- .irq_mask = rcar_msi_top_irq_mask,
- .irq_unmask = rcar_msi_top_irq_unmask,
-};
-
static void rcar_msi_irq_ack(struct irq_data *d)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
@@ -644,33 +603,26 @@ static void rcar_msi_irq_mask(struct irq_data *d)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = rcar_pci_read_reg(pcie, PCIEMSIIER);
- value &= ~BIT(d->hwirq);
- rcar_pci_write_reg(pcie, value, PCIEMSIIER);
- spin_unlock_irqrestore(&msi->mask_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value &= ~BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+ }
}
static void rcar_msi_irq_unmask(struct irq_data *d)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = rcar_pci_read_reg(pcie, PCIEMSIIER);
- value |= BIT(d->hwirq);
- rcar_pci_write_reg(pcie, value, PCIEMSIIER);
- spin_unlock_irqrestore(&msi->mask_lock, flags);
-}
-
-static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value |= BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+ }
}
static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -684,11 +636,10 @@ static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
}
static struct irq_chip rcar_msi_bottom_chip = {
- .name = "Rcar MSI",
+ .name = "R-Car MSI",
.irq_ack = rcar_msi_irq_ack,
.irq_mask = rcar_msi_irq_mask,
.irq_unmask = rcar_msi_irq_unmask,
- .irq_set_affinity = rcar_msi_set_affinity,
.irq_compose_msi_msg = rcar_compose_msi_msg,
};
@@ -734,30 +685,36 @@ static const struct irq_domain_ops rcar_msi_domain_ops = {
.free = rcar_msi_domain_free,
};
-static struct msi_domain_info rcar_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
- .chip = &rcar_msi_top_chip,
+#define RCAR_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define RCAR_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops rcar_msi_parent_ops = {
+ .required_flags = RCAR_MSI_FLAGS_REQUIRED,
+ .supported_flags = RCAR_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "RCAR-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int rcar_allocate_domains(struct rcar_msi *msi)
{
struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
- struct irq_domain *parent;
-
- parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
- &rcar_msi_domain_ops, msi);
- if (!parent) {
- dev_err(pcie->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
-
- msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(pcie->dev),
+ .ops = &rcar_msi_domain_ops,
+ .host_data = msi,
+ .size = INT_PCI_MSI_NR,
+ };
+
+ msi->domain = msi_create_parent_irq_domain(&info, &rcar_msi_parent_ops);
if (!msi->domain) {
- dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
@@ -766,10 +723,7 @@ static int rcar_allocate_domains(struct rcar_msi *msi)
static void rcar_free_domains(struct rcar_msi *msi)
{
- struct irq_domain *parent = msi->domain->parent;
-
irq_domain_remove(msi->domain);
- irq_domain_remove(parent);
}
static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
@@ -781,7 +735,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
int err;
mutex_init(&msi->map_lock);
- spin_lock_init(&msi->mask_lock);
+ raw_spin_lock_init(&msi->mask_lock);
err = of_address_to_resource(dev->of_node, 0, &res);
if (err)
@@ -791,7 +745,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
if (err)
return err;
- /* Two irqs are for MSI, but they are also used for non-MSI irqs */
+ /* Two IRQs are for MSI, but they are also used for non-MSI IRQs */
err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
rcar_msi_bottom_chip.name, host);
@@ -808,12 +762,12 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
goto err;
}
- /* disable all MSIs */
+ /* Disable all MSIs */
rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
/*
- * Setup MSI data target using RC base address address, which
- * is guaranteed to be in the low 32bit range on any RCar HW.
+ * Setup MSI data target using RC base address, which is guaranteed
+ * to be in the low 32bit range on any R-Car HW.
*/
rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
@@ -879,12 +833,6 @@ static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
}
host->msi.irq2 = i;
-#ifdef CONFIG_ARM
- /* Cache static copy for L1 link state fixup hook on aarch32 */
- pcie_base = pcie->base;
- pcie_dev = pcie->dev;
-#endif
-
return 0;
err_irq2:
@@ -914,6 +862,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
dev_err(pcie->dev, "Failed to map inbound regions!\n");
return -EINVAL;
}
+
/*
* If the size of the range is larger than the alignment of
* the start address, we have to use multiple entries to
@@ -925,6 +874,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
size = min(size, alignment);
}
+
/* Hardware supports max 4GiB inbound region */
size = min(size, 1ULL << 32);
@@ -974,14 +924,22 @@ static const struct of_device_id rcar_pcie_of_match[] = {
{},
};
+/* Design note 346 from Linear Technology says order is not important. */
+static const char * const rcar_pcie_supplies[] = {
+ "vpcie1v5",
+ "vpcie3v3",
+ "vpcie12v",
+};
+
static int rcar_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
struct rcar_pcie_host *host;
struct rcar_pcie *pcie;
+ unsigned int i;
u32 data;
int err;
- struct pci_host_bridge *bridge;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
if (!bridge)
@@ -992,6 +950,13 @@ static int rcar_pcie_probe(struct platform_device *pdev)
pcie->dev = dev;
platform_set_drvdata(pdev, host);
+ for (i = 0; i < ARRAY_SIZE(rcar_pcie_supplies); i++) {
+ err = devm_regulator_get_enable_optional(dev, rcar_pcie_supplies[i]);
+ if (err < 0 && err != -ENODEV)
+ return dev_err_probe(dev, err, "failed to enable regulator: %s\n",
+ rcar_pcie_supplies[i]);
+ }
+
pm_runtime_enable(pcie->dev);
err = pm_runtime_get_sync(pcie->dev);
if (err < 0) {
diff --git a/drivers/pci/controller/pcie-rcar.h b/drivers/pci/controller/pcie-rcar.h
index 9bb125db85c6..6799f81392fc 100644
--- a/drivers/pci/controller/pcie-rcar.h
+++ b/drivers/pci/controller/pcie-rcar.h
@@ -11,7 +11,7 @@
#define PCIECAR 0x000010
#define PCIECCTLR 0x000018
-#define CONFIG_SEND_ENABLE BIT(31)
+#define PCIECCTLR_CCIE BIT(31)
#define TYPE0 (0 << 8)
#define TYPE1 BIT(8)
#define PCIECDR 0x000020
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index d1a200b93b2b..799461335762 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -10,12 +10,16 @@
#include <linux/configfs.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/irq.h>
#include <linux/of.h>
#include <linux/pci-epc.h>
#include <linux/platform_device.h>
#include <linux/pci-epf.h>
#include <linux/sizes.h>
+#include <linux/workqueue.h>
#include "pcie-rockchip.h"
@@ -26,16 +30,20 @@
* @max_regions: maximum number of regions supported by hardware
* @ob_region_map: bitmask of mapped outbound regions
* @ob_addr: base addresses in the AXI bus where the outbound regions start
- * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ
* dedicated outbound regions is mapped.
* @irq_cpu_addr: base address in the CPU space where a write access triggers
- * the sending of a memory write (MSI) / normal message (legacy
+ * the sending of a memory write (MSI) / normal message (INTX
* IRQ) TLP through the PCIe bus.
- * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ
* dedicated outbound region.
* @irq_pci_fn: the latest PCI function that has updated the mapping of
- * the MSI/legacy IRQ dedicated outbound region.
- * @irq_pending: bitmask of asserted legacy IRQs.
+ * the MSI/INTX IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted INTX IRQs.
+ * @perst_irq: IRQ used for the PERST# signal.
+ * @perst_asserted: True if the PERST# signal was asserted.
+ * @link_up: True if the PCI link is up.
+ * @link_training: Work item to execute PCI link training.
*/
struct rockchip_pcie_ep {
struct rockchip_pcie rockchip;
@@ -48,6 +56,10 @@ struct rockchip_pcie_ep {
u64 irq_pci_addr;
u8 irq_pci_fn;
u8 irq_pending;
+ int perst_irq;
+ bool perst_asserted;
+ bool link_up;
+ struct delayed_work link_training;
};
static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
@@ -61,84 +73,61 @@ static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region));
rockchip_pcie_write(rockchip, 0,
ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
- rockchip_pcie_write(rockchip, 0,
- ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(region));
- rockchip_pcie_write(rockchip, 0,
- ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(region));
+}
+
+static int rockchip_pcie_ep_ob_atu_num_bits(struct rockchip_pcie *rockchip,
+ u64 pci_addr, size_t size)
+{
+ int num_pass_bits = fls64(pci_addr ^ (pci_addr + size - 1));
+
+ return clamp(num_pass_bits,
+ ROCKCHIP_PCIE_AT_MIN_NUM_BITS,
+ ROCKCHIP_PCIE_AT_MAX_NUM_BITS);
}
static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
- u32 r, u32 type, u64 cpu_addr,
- u64 pci_addr, size_t size)
+ u32 r, u64 cpu_addr, u64 pci_addr,
+ size_t size)
{
- u64 sz = 1ULL << fls64(size - 1);
- int num_pass_bits = ilog2(sz);
- u32 addr0, addr1, desc0, desc1;
- bool is_nor_msg = (type == AXI_WRAPPER_NOR_MSG);
-
- /* The minimal region size is 1MB */
- if (num_pass_bits < 8)
- num_pass_bits = 8;
-
- cpu_addr -= rockchip->mem_res->start;
- addr0 = ((is_nor_msg ? 0x10 : (num_pass_bits - 1)) &
- PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
- (lower_32_bits(cpu_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
- addr1 = upper_32_bits(is_nor_msg ? cpu_addr : pci_addr);
- desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | type;
- desc1 = 0;
-
- if (is_nor_msg) {
- rockchip_pcie_write(rockchip, 0,
- ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
- rockchip_pcie_write(rockchip, 0,
- ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
- rockchip_pcie_write(rockchip, desc0,
- ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
- rockchip_pcie_write(rockchip, desc1,
- ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
- } else {
- /* PCI bus address region */
- rockchip_pcie_write(rockchip, addr0,
- ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
- rockchip_pcie_write(rockchip, addr1,
- ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
- rockchip_pcie_write(rockchip, desc0,
- ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
- rockchip_pcie_write(rockchip, desc1,
- ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
-
- addr0 =
- ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
- (lower_32_bits(cpu_addr) &
- PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
- addr1 = upper_32_bits(cpu_addr);
- }
+ int num_pass_bits;
+ u32 addr0, addr1, desc0;
+
+ num_pass_bits = rockchip_pcie_ep_ob_atu_num_bits(rockchip,
+ pci_addr, size);
+
+ addr0 = ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
+ (lower_32_bits(pci_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
+ addr1 = upper_32_bits(pci_addr);
+ desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | AXI_WRAPPER_MEM_WRITE;
- /* CPU bus address region */
+ /* PCI bus address region */
rockchip_pcie_write(rockchip, addr0,
- ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r));
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
rockchip_pcie_write(rockchip, addr1,
- ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r));
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
+ rockchip_pcie_write(rockchip, desc0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
}
static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
+ u32 reg;
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
/* All functions share the same vendor ID with function 0 */
if (fn == 0) {
- u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
- (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
-
- rockchip_pcie_write(rockchip, vid_regs,
+ rockchip_pcie_write(rockchip,
+ hdr->vendorid | hdr->subsys_vendor_id << 16,
PCIE_CORE_CONFIG_VENDOR);
}
- rockchip_pcie_write(rockchip, hdr->deviceid << 16,
- ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_VENDOR_ID);
+ reg = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_DID_VID);
+ reg = (reg & 0xFFFF) | (hdr->deviceid << 16);
+ rockchip_pcie_write(rockchip, reg, PCIE_EP_CONFIG_DID_VID);
rockchip_pcie_write(rockchip,
hdr->revid |
@@ -184,7 +173,7 @@ static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
} else {
bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
- bool is_64bits = sz > SZ_2G;
+ bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
if (is_64bits && (bar & 1))
return -EINVAL;
@@ -256,26 +245,45 @@ static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
}
+static inline u32 rockchip_ob_region(phys_addr_t addr)
+{
+ return (addr >> ilog2(SZ_1M)) & 0x1f;
+}
+
+static u64 rockchip_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr,
+ size_t *pci_size, size_t *addr_offset)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ size_t size = *pci_size;
+ u64 offset, mask;
+ int num_bits;
+
+ num_bits = rockchip_pcie_ep_ob_atu_num_bits(&ep->rockchip,
+ pci_addr, size);
+ mask = (1ULL << num_bits) - 1;
+
+ offset = pci_addr & mask;
+ if (size + offset > SZ_1M)
+ size = SZ_1M - offset;
+
+ *pci_size = ALIGN(offset + size, ROCKCHIP_PCIE_AT_SIZE_ALIGN);
+ *addr_offset = offset;
+
+ return pci_addr & ~mask;
+}
+
static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u64 pci_addr,
size_t size)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *pcie = &ep->rockchip;
- u32 r;
+ u32 r = rockchip_ob_region(addr);
- r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
- /*
- * Region 0 is reserved for configuration space and shouldn't
- * be used elsewhere per TRM, so leave it out.
- */
- if (r >= ep->max_regions - 1) {
- dev_err(&epc->dev, "no free outbound region\n");
- return -EINVAL;
- }
+ if (test_bit(r, &ep->ob_region_map))
+ return -EBUSY;
- rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, AXI_WRAPPER_MEM_WRITE, addr,
- pci_addr, size);
+ rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, addr, pci_addr, size);
set_bit(r, &ep->ob_region_map);
ep->ob_addr[r] = addr;
@@ -288,17 +296,9 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
- u32 r;
+ u32 r = rockchip_ob_region(addr);
- for (r = 0; r < ep->max_regions - 1; r++)
- if (ep->ob_addr[r] == addr)
- break;
-
- /*
- * Region 0 is reserved for configuration space and shouldn't
- * be used elsewhere per TRM, so leave it out.
- */
- if (r == ep->max_regions - 1)
+ if (addr != ep->ob_addr[r] || !test_bit(r, &ep->ob_region_map))
return;
rockchip_pcie_clear_ep_ob_atu(rockchip, r);
@@ -308,19 +308,20 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
}
static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
- u8 multi_msg_cap)
+ u8 nr_irqs)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
- u16 flags;
+ u8 mmc = order_base_2(nr_irqs);
+ u32 flags;
flags = rockchip_pcie_read(rockchip,
ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
flags |=
- ((multi_msg_cap << 1) << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
- PCI_MSI_FLAGS_64BIT;
+ (mmc << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
+ (PCI_MSI_FLAGS_64BIT << ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET);
flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
rockchip_pcie_write(rockchip, flags,
ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
@@ -332,7 +333,7 @@ static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
- u16 flags;
+ u32 flags;
flags = rockchip_pcie_read(rockchip,
ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
@@ -340,57 +341,34 @@ static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
return -EINVAL;
- return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
- ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
+ return 1 << ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
+ ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
}
static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
- u8 intx, bool is_asserted)
+ u8 intx, bool do_assert)
{
struct rockchip_pcie *rockchip = &ep->rockchip;
- u32 r = ep->max_regions - 1;
- u32 offset;
- u32 status;
- u8 msg_code;
-
- if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR ||
- ep->irq_pci_fn != fn)) {
- rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r,
- AXI_WRAPPER_NOR_MSG,
- ep->irq_phys_addr, 0, 0);
- ep->irq_pci_addr = ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR;
- ep->irq_pci_fn = fn;
- }
intx &= 3;
- if (is_asserted) {
+
+ if (do_assert) {
ep->irq_pending |= BIT(intx);
- msg_code = ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA + intx;
+ rockchip_pcie_write(rockchip,
+ PCIE_CLIENT_INT_IN_ASSERT |
+ PCIE_CLIENT_INT_PEND_ST_PEND,
+ PCIE_CLIENT_LEGACY_INT_CTRL);
} else {
ep->irq_pending &= ~BIT(intx);
- msg_code = ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA + intx;
+ rockchip_pcie_write(rockchip,
+ PCIE_CLIENT_INT_IN_DEASSERT |
+ PCIE_CLIENT_INT_PEND_ST_NORMAL,
+ PCIE_CLIENT_LEGACY_INT_CTRL);
}
-
- status = rockchip_pcie_read(rockchip,
- ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
- ROCKCHIP_PCIE_EP_CMD_STATUS);
- status &= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
-
- if ((status != 0) ^ (ep->irq_pending != 0)) {
- status ^= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
- rockchip_pcie_write(rockchip, status,
- ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
- ROCKCHIP_PCIE_EP_CMD_STATUS);
- }
-
- offset =
- ROCKCHIP_PCIE_MSG_ROUTING(ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX) |
- ROCKCHIP_PCIE_MSG_CODE(msg_code) | ROCKCHIP_PCIE_MSG_NO_DATA;
- writel(0, ep->irq_cpu_addr + offset);
}
-static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn,
- u8 intx)
+static int rockchip_pcie_ep_send_intx_irq(struct rockchip_pcie_ep *ep, u8 fn,
+ u8 intx)
{
u16 cmd;
@@ -416,9 +394,11 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
u8 interrupt_num)
{
struct rockchip_pcie *rockchip = &ep->rockchip;
- u16 flags, mme, data, data_mask;
+ u32 flags, mme, data, data_mask;
+ size_t irq_pci_size, offset;
+ u64 irq_pci_addr;
u8 msi_count;
- u64 pci_addr, pci_addr_mask = 0xff;
+ u64 pci_addr;
/* Check MSI enable bit */
flags = rockchip_pcie_read(&ep->rockchip,
@@ -452,34 +432,35 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
PCI_MSI_ADDRESS_LO);
- pci_addr &= GENMASK_ULL(63, 2);
/* Set the outbound region if needed. */
- if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
+ irq_pci_size = ~PCIE_ADDR_MASK + 1;
+ irq_pci_addr = rockchip_pcie_ep_align_addr(ep->epc,
+ pci_addr & PCIE_ADDR_MASK,
+ &irq_pci_size, &offset);
+ if (unlikely(ep->irq_pci_addr != irq_pci_addr ||
ep->irq_pci_fn != fn)) {
- rockchip_pcie_prog_ep_ob_atu(rockchip, fn, ep->max_regions - 1,
- AXI_WRAPPER_MEM_WRITE,
- ep->irq_phys_addr,
- pci_addr & ~pci_addr_mask,
- pci_addr_mask + 1);
- ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
+ rockchip_pcie_prog_ep_ob_atu(rockchip, fn,
+ rockchip_ob_region(ep->irq_phys_addr),
+ ep->irq_phys_addr,
+ irq_pci_addr, irq_pci_size);
+ ep->irq_pci_addr = irq_pci_addr;
ep->irq_pci_fn = fn;
}
- writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
+ writew(data, ep->irq_cpu_addr + offset + (pci_addr & ~PCIE_ADDR_MASK));
return 0;
}
static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return rockchip_pcie_ep_send_intx_irq(ep, fn, 0);
+ case PCI_IRQ_MSI:
return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
default:
return -EINVAL;
@@ -499,13 +480,222 @@ static int rockchip_pcie_ep_start(struct pci_epc *epc)
rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG);
+ if (rockchip->perst_gpio)
+ enable_irq(ep->perst_irq);
+
+ /* Enable configuration and start link training */
+ rockchip_pcie_write(rockchip,
+ PCIE_CLIENT_LINK_TRAIN_ENABLE |
+ PCIE_CLIENT_CONF_ENABLE,
+ PCIE_CLIENT_CONFIG);
+
+ if (!rockchip->perst_gpio)
+ schedule_delayed_work(&ep->link_training, 0);
+
+ return 0;
+}
+
+static void rockchip_pcie_ep_stop(struct pci_epc *epc)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+
+ if (rockchip->perst_gpio) {
+ ep->perst_asserted = true;
+ disable_irq(ep->perst_irq);
+ }
+
+ cancel_delayed_work_sync(&ep->link_training);
+
+ /* Stop link training and disable configuration */
+ rockchip_pcie_write(rockchip,
+ PCIE_CLIENT_CONF_DISABLE |
+ PCIE_CLIENT_LINK_TRAIN_DISABLE,
+ PCIE_CLIENT_CONFIG);
+}
+
+static void rockchip_pcie_ep_retrain_link(struct rockchip_pcie *rockchip)
+{
+ u32 status;
+
+ status = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE + PCI_EXP_LNKCTL);
+ status |= PCI_EXP_LNKCTL_RL;
+ rockchip_pcie_write(rockchip, status, PCIE_EP_CONFIG_BASE + PCI_EXP_LNKCTL);
+}
+
+static bool rockchip_pcie_ep_link_up(struct rockchip_pcie *rockchip)
+{
+ u32 val = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS1);
+
+ return PCIE_LINK_UP(val);
+}
+
+static void rockchip_pcie_ep_link_training(struct work_struct *work)
+{
+ struct rockchip_pcie_ep *ep =
+ container_of(work, struct rockchip_pcie_ep, link_training.work);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ struct device *dev = rockchip->dev;
+ u32 val;
+ int ret;
+
+ /* Enable Gen1 training and wait for its completion */
+ ret = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
+ val, PCIE_LINK_TRAINING_DONE(val), 50,
+ LINK_TRAIN_TIMEOUT);
+ if (ret)
+ goto again;
+
+ /* Make sure that the link is up */
+ ret = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
+ val, PCIE_LINK_UP(val), 50,
+ LINK_TRAIN_TIMEOUT);
+ if (ret)
+ goto again;
+
+ /*
+ * Check the current speed: if gen2 speed was requested and we are not
+ * at gen2 speed yet, retrain again for gen2.
+ */
+ val = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+ if (!PCIE_LINK_IS_GEN2(val) && rockchip->link_gen == 2) {
+ /* Enable retrain for gen2 */
+ rockchip_pcie_ep_retrain_link(rockchip);
+ readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
+ val, PCIE_LINK_IS_GEN2(val), 50,
+ LINK_TRAIN_TIMEOUT);
+ }
+
+ /* Check again that the link is up */
+ if (!rockchip_pcie_ep_link_up(rockchip))
+ goto again;
+
+ /*
+ * If PERST# was asserted while polling the link, do not notify
+ * the function.
+ */
+ if (ep->perst_asserted)
+ return;
+
+ val = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS0);
+ dev_info(dev,
+ "link up (negotiated speed: %sGT/s, width: x%lu)\n",
+ (val & PCIE_CLIENT_NEG_LINK_SPEED) ? "5" : "2.5",
+ ((val & PCIE_CLIENT_NEG_LINK_WIDTH_MASK) >>
+ PCIE_CLIENT_NEG_LINK_WIDTH_SHIFT) << 1);
+
+ /* Notify the function */
+ pci_epc_linkup(ep->epc);
+ ep->link_up = true;
+
+ return;
+
+again:
+ schedule_delayed_work(&ep->link_training, msecs_to_jiffies(5));
+}
+
+static void rockchip_pcie_ep_perst_assert(struct rockchip_pcie_ep *ep)
+{
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+
+ dev_dbg(rockchip->dev, "PERST# asserted, link down\n");
+
+ if (ep->perst_asserted)
+ return;
+
+ ep->perst_asserted = true;
+
+ cancel_delayed_work_sync(&ep->link_training);
+
+ if (ep->link_up) {
+ pci_epc_linkdown(ep->epc);
+ ep->link_up = false;
+ }
+}
+
+static void rockchip_pcie_ep_perst_deassert(struct rockchip_pcie_ep *ep)
+{
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+
+ dev_dbg(rockchip->dev, "PERST# de-asserted, starting link training\n");
+
+ if (!ep->perst_asserted)
+ return;
+
+ ep->perst_asserted = false;
+
+ /* Enable link re-training */
+ rockchip_pcie_ep_retrain_link(rockchip);
+
+ /* Start link training */
+ schedule_delayed_work(&ep->link_training, 0);
+}
+
+static irqreturn_t rockchip_pcie_ep_perst_irq_thread(int irq, void *data)
+{
+ struct pci_epc *epc = data;
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 perst = gpiod_get_value(rockchip->perst_gpio);
+
+ if (perst)
+ rockchip_pcie_ep_perst_assert(ep);
+ else
+ rockchip_pcie_ep_perst_deassert(ep);
+
+ irq_set_irq_type(ep->perst_irq,
+ (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW));
+
+ return IRQ_HANDLED;
+}
+
+static int rockchip_pcie_ep_setup_irq(struct pci_epc *epc)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ struct device *dev = rockchip->dev;
+ int ret;
+
+ if (!rockchip->perst_gpio)
+ return 0;
+
+ /* PCIe reset interrupt */
+ ep->perst_irq = gpiod_to_irq(rockchip->perst_gpio);
+ if (ep->perst_irq < 0) {
+ dev_err(dev,
+ "failed to get IRQ for PERST# GPIO: %d\n",
+ ep->perst_irq);
+
+ return ep->perst_irq;
+ }
+
+ /*
+ * The perst_gpio is active low, so when it is inactive on start, it
+ * is high and will trigger the perst_irq handler. So treat this initial
+ * IRQ as a dummy one by faking the host asserting PERST#.
+ */
+ ep->perst_asserted = true;
+ irq_set_status_flags(ep->perst_irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(dev, ep->perst_irq, NULL,
+ rockchip_pcie_ep_perst_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "pcie-ep-perst", epc);
+ if (ret) {
+ dev_err(dev,
+ "failed to request IRQ for PERST# GPIO: %d\n",
+ ret);
+
+ return ret;
+ }
+
return 0;
}
static const struct pci_epc_features rockchip_pcie_epc_features = {
- .linkup_notifier = false,
+ .linkup_notifier = true,
.msi_capable = true,
- .msix_capable = false,
+ .intx_capable = true,
+ .align = ROCKCHIP_PCIE_AT_SIZE_ALIGN,
};
static const struct pci_epc_features*
@@ -518,17 +708,19 @@ static const struct pci_epc_ops rockchip_pcie_epc_ops = {
.write_header = rockchip_pcie_ep_write_header,
.set_bar = rockchip_pcie_ep_set_bar,
.clear_bar = rockchip_pcie_ep_clear_bar,
+ .align_addr = rockchip_pcie_ep_align_addr,
.map_addr = rockchip_pcie_ep_map_addr,
.unmap_addr = rockchip_pcie_ep_unmap_addr,
.set_msi = rockchip_pcie_ep_set_msi,
.get_msi = rockchip_pcie_ep_get_msi,
.raise_irq = rockchip_pcie_ep_raise_irq,
.start = rockchip_pcie_ep_start,
+ .stop = rockchip_pcie_ep_stop,
.get_features = rockchip_pcie_ep_get_features,
};
-static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip,
- struct rockchip_pcie_ep *ep)
+static int rockchip_pcie_ep_get_resources(struct rockchip_pcie *rockchip,
+ struct rockchip_pcie_ep *ep)
{
struct device *dev = rockchip->dev;
int err;
@@ -547,6 +739,8 @@ static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip,
if (err < 0 || ep->max_regions > MAX_REGION_LIMIT)
ep->max_regions = MAX_REGION_LIMIT;
+ ep->ob_region_map = 0;
+
err = of_property_read_u8(dev->of_node, "max-functions",
&ep->epc->max_functions);
if (err < 0)
@@ -560,13 +754,94 @@ static const struct of_device_id rockchip_pcie_ep_of_match[] = {
{},
};
+static int rockchip_pcie_ep_init_ob_mem(struct rockchip_pcie_ep *ep)
+{
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ struct device *dev = rockchip->dev;
+ struct pci_epc_mem_window *windows = NULL;
+ int err, i;
+
+ ep->ob_addr = devm_kcalloc(dev, ep->max_regions, sizeof(*ep->ob_addr),
+ GFP_KERNEL);
+
+ if (!ep->ob_addr)
+ return -ENOMEM;
+
+ windows = devm_kcalloc(dev, ep->max_regions,
+ sizeof(struct pci_epc_mem_window), GFP_KERNEL);
+ if (!windows)
+ return -ENOMEM;
+
+ for (i = 0; i < ep->max_regions; i++) {
+ windows[i].phys_base = rockchip->mem_res->start + (SZ_1M * i);
+ windows[i].size = SZ_1M;
+ windows[i].page_size = SZ_1M;
+ }
+ err = pci_epc_multi_mem_init(ep->epc, windows, ep->max_regions);
+ devm_kfree(dev, windows);
+
+ if (err < 0) {
+ dev_err(dev, "failed to initialize the memory space\n");
+ return err;
+ }
+
+ ep->irq_cpu_addr = pci_epc_mem_alloc_addr(ep->epc, &ep->irq_phys_addr,
+ SZ_1M);
+ if (!ep->irq_cpu_addr) {
+ dev_err(dev, "failed to reserve memory space for MSI\n");
+ err = -ENOMEM;
+ goto err_epc_mem_exit;
+ }
+
+ ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
+
+ return 0;
+
+err_epc_mem_exit:
+ pci_epc_mem_exit(ep->epc);
+
+ return err;
+}
+
+static void rockchip_pcie_ep_exit_ob_mem(struct rockchip_pcie_ep *ep)
+{
+ pci_epc_mem_exit(ep->epc);
+}
+
+static void rockchip_pcie_ep_hide_broken_msix_cap(struct rockchip_pcie *rockchip)
+{
+ u32 cfg_msi, cfg_msix_cp;
+
+ /*
+ * MSI-X is not supported but the controller still advertises the MSI-X
+ * capability by default, which can lead to the Root Complex side
+ * allocating MSI-X vectors which cannot be used. Avoid this by skipping
+ * the MSI-X capability entry in the PCIe capabilities linked-list: get
+ * the next pointer from the MSI-X entry and set that in the MSI
+ * capability entry (which is the previous entry). This way the MSI-X
+ * entry is skipped (left out of the linked-list) and not advertised.
+ */
+ cfg_msi = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+
+ cfg_msi &= ~ROCKCHIP_PCIE_EP_MSI_CP1_MASK;
+
+ cfg_msix_cp = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE +
+ ROCKCHIP_PCIE_EP_MSIX_CAP_REG) &
+ ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK;
+
+ cfg_msi |= cfg_msix_cp;
+
+ rockchip_pcie_write(rockchip, cfg_msi,
+ PCIE_EP_CONFIG_BASE + ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+}
+
static int rockchip_pcie_ep_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rockchip_pcie_ep *ep;
struct rockchip_pcie *rockchip;
struct pci_epc *epc;
- size_t max_regions;
int err;
ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
@@ -576,68 +851,51 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev)
rockchip = &ep->rockchip;
rockchip->is_rc = false;
rockchip->dev = dev;
+ INIT_DELAYED_WORK(&ep->link_training, rockchip_pcie_ep_link_training);
epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
if (IS_ERR(epc)) {
- dev_err(dev, "failed to create epc device\n");
+ dev_err(dev, "failed to create EPC device\n");
return PTR_ERR(epc);
}
ep->epc = epc;
epc_set_drvdata(epc, ep);
- err = rockchip_pcie_parse_ep_dt(rockchip, ep);
+ err = rockchip_pcie_ep_get_resources(rockchip, ep);
if (err)
return err;
- err = rockchip_pcie_enable_clocks(rockchip);
+ err = rockchip_pcie_ep_init_ob_mem(ep);
if (err)
return err;
+ err = rockchip_pcie_enable_clocks(rockchip);
+ if (err)
+ goto err_exit_ob_mem;
+
err = rockchip_pcie_init_port(rockchip);
if (err)
goto err_disable_clocks;
- /* Establish the link automatically */
- rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
- PCIE_CLIENT_CONFIG);
-
- max_regions = ep->max_regions;
- ep->ob_addr = devm_kcalloc(dev, max_regions, sizeof(*ep->ob_addr),
- GFP_KERNEL);
-
- if (!ep->ob_addr) {
- err = -ENOMEM;
- goto err_uninit_port;
- }
+ rockchip_pcie_ep_hide_broken_msix_cap(rockchip);
/* Only enable function 0 by default */
rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
- err = pci_epc_mem_init(epc, rockchip->mem_res->start,
- resource_size(rockchip->mem_res), PAGE_SIZE);
- if (err < 0) {
- dev_err(dev, "failed to initialize the memory space\n");
- goto err_uninit_port;
- }
-
- ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
- SZ_128K);
- if (!ep->irq_cpu_addr) {
- dev_err(dev, "failed to reserve memory space for MSI\n");
- err = -ENOMEM;
- goto err_epc_mem_exit;
- }
+ pci_epc_init_notify(epc);
- ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
+ err = rockchip_pcie_ep_setup_irq(epc);
+ if (err < 0)
+ goto err_uninit_port;
return 0;
-err_epc_mem_exit:
- pci_epc_mem_exit(epc);
err_uninit_port:
rockchip_pcie_deinit_phys(rockchip);
err_disable_clocks:
rockchip_pcie_disable_clocks(rockchip);
+err_exit_ob_mem:
+ rockchip_pcie_ep_exit_ob_mem(ep);
return err;
}
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index c96c0f454570..ee1822ca01db 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -11,29 +11,19 @@
* ARM PCI Host generic driver.
*/
+#include <linux/bitfield.h>
#include <linux/bitrev.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
-#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
-#include <linux/of_platform.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
-#include <linux/regmap.h>
#include "../pci.h"
#include "pcie-rockchip.h"
@@ -42,18 +32,18 @@ static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
{
u32 status;
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
}
static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
{
u32 status;
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
}
static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
@@ -271,7 +261,7 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
scale = 3; /* 0.001x */
curr = curr / 1000; /* convert to mA */
power = (curr * 3300) / 1000; /* milliwatt */
- while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
+ while (power > FIELD_MAX(PCI_EXP_DEVCAP_PWR_VAL)) {
if (!scale) {
dev_warn(rockchip->dev, "invalid power supply\n");
return;
@@ -280,10 +270,10 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
power = power / 10;
}
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
- status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
- (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCAP);
+ status |= FIELD_PREP(PCI_EXP_DEVCAP_PWR_VAL, power);
+ status |= FIELD_PREP(PCI_EXP_DEVCAP_PWR_SCL, scale);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCAP);
}
/**
@@ -296,7 +286,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
int err, i = MAX_LANE_NUM;
u32 status;
- gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
+ gpiod_set_value_cansleep(rockchip->perst_gpio, 0);
err = rockchip_pcie_init_port(rockchip);
if (err)
@@ -311,20 +301,23 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_set_power_limit(rockchip);
/* Set RC's clock architecture as common clock */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
status |= PCI_EXP_LNKSTA_SLC << 16;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
/* Set RC's RCB to 128 */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
status |= PCI_EXP_LNKCTL_RCB;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
/* Enable Gen1 training */
rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
PCIE_CLIENT_CONFIG);
- gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(rockchip->perst_gpio, 1);
+
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
/* 500ms timeout value should be enough for Gen1/2 training */
err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
@@ -340,9 +333,13 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
* Enable retrain for gen2. This should be configured only after
* gen1 finished.
*/
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL2);
+ status &= ~PCI_EXP_LNKCTL2_TLS;
+ status |= PCI_EXP_LNKCTL2_TLS_5_0GT;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL2);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
status |= PCI_EXP_LNKCTL_RL;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
status, PCIE_LINK_IS_GEN2(status), 20,
@@ -366,7 +363,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
}
}
- rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
+ rockchip_pcie_write(rockchip, PCI_VENDOR_ID_ROCKCHIP,
PCIE_CORE_CONFIG_VENDOR);
rockchip_pcie_write(rockchip,
PCI_CLASS_BRIDGE_PCI_NORMAL << 8,
@@ -379,15 +376,15 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
/* Clear L0s from RC's link cap */
if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
- status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCAP);
+ status &= ~PCI_EXP_LNKCAP_ASPM_L0S;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCAP);
}
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
- status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
- status |= PCIE_RC_CONFIG_DCSR_MPS_256;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCTL);
+ status &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ status |= PCI_EXP_DEVCTL_PAYLOAD_256B;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCTL);
return 0;
err_power_off_phy:
@@ -438,7 +435,7 @@ static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
dev_dbg(dev, "malformed TLP received from the link\n");
if (sub_reg & PCIE_CORE_INT_UCR)
- dev_dbg(dev, "malformed TLP received from the link\n");
+ dev_dbg(dev, "Unexpected Completion received from the link\n");
if (sub_reg & PCIE_CORE_INT_FCE)
dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
@@ -488,7 +485,7 @@ static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
dev_dbg(dev, "fatal error interrupt received\n");
if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
- dev_dbg(dev, "no fatal error interrupt received\n");
+ dev_dbg(dev, "non fatal error interrupt received\n");
if (reg & PCIE_CLIENT_INT_CORR_ERR)
dev_dbg(dev, "correctable error interrupt received\n");
@@ -507,7 +504,7 @@ static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+static void rockchip_pcie_intx_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
@@ -555,7 +552,7 @@ static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
return irq;
irq_set_chained_handler_and_data(irq,
- rockchip_pcie_legacy_int_handler,
+ rockchip_pcie_intx_handler,
rockchip);
irq = platform_get_irq_byname(pdev, "client");
@@ -692,8 +689,8 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return -EINVAL;
}
- rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &intx_domain_ops, rockchip);
+ rockchip->irq_domain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
of_node_put(intc);
if (!rockchip->irq_domain) {
dev_err(dev, "failed to get a INTx IRQ domain\n");
@@ -1009,7 +1006,7 @@ err_set_vpcie:
return err;
}
-static int rockchip_pcie_remove(struct platform_device *pdev)
+static void rockchip_pcie_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
@@ -1029,8 +1026,6 @@ static int rockchip_pcie_remove(struct platform_device *pdev)
regulator_disable(rockchip->vpcie3v3);
regulator_disable(rockchip->vpcie1v8);
regulator_disable(rockchip->vpcie0v9);
-
- return 0;
}
static const struct dev_pm_ops rockchip_pcie_pm_ops = {
diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
index 990a00e08bc5..0f88da378805 100644
--- a/drivers/pci/controller/pcie-rockchip.c
+++ b/drivers/pci/controller/pcie-rockchip.c
@@ -14,6 +14,8 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -28,7 +30,7 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *node = dev->of_node;
struct resource *regs;
- int err;
+ int err, i;
if (rockchip->is_rc) {
regs = platform_get_resource_byname(pdev,
@@ -67,115 +69,59 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
rockchip->link_gen = 2;
- rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
- if (IS_ERR(rockchip->core_rst)) {
- if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing core reset property in node\n");
- return PTR_ERR(rockchip->core_rst);
- }
-
- rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
- if (IS_ERR(rockchip->mgmt_rst)) {
- if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt reset property in node\n");
- return PTR_ERR(rockchip->mgmt_rst);
- }
-
- rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
- "mgmt-sticky");
- if (IS_ERR(rockchip->mgmt_sticky_rst)) {
- if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt-sticky reset property in node\n");
- return PTR_ERR(rockchip->mgmt_sticky_rst);
- }
-
- rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
- if (IS_ERR(rockchip->pipe_rst)) {
- if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pipe reset property in node\n");
- return PTR_ERR(rockchip->pipe_rst);
- }
-
- rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
- if (IS_ERR(rockchip->pm_rst)) {
- if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pm reset property in node\n");
- return PTR_ERR(rockchip->pm_rst);
- }
+ for (i = 0; i < ROCKCHIP_NUM_PM_RSTS; i++)
+ rockchip->pm_rsts[i].id = rockchip_pci_pm_rsts[i];
- rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
- if (IS_ERR(rockchip->pclk_rst)) {
- if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pclk reset property in node\n");
- return PTR_ERR(rockchip->pclk_rst);
- }
-
- rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
- if (IS_ERR(rockchip->aclk_rst)) {
- if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing aclk reset property in node\n");
- return PTR_ERR(rockchip->aclk_rst);
- }
-
- if (rockchip->is_rc) {
- rockchip->ep_gpio = devm_gpiod_get_optional(dev, "ep",
- GPIOD_OUT_HIGH);
- if (IS_ERR(rockchip->ep_gpio))
- return dev_err_probe(dev, PTR_ERR(rockchip->ep_gpio),
- "failed to get ep GPIO\n");
- }
+ err = devm_reset_control_bulk_get_exclusive(dev,
+ ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot get the PM reset\n");
- rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
- if (IS_ERR(rockchip->aclk_pcie)) {
- dev_err(dev, "aclk clock not found\n");
- return PTR_ERR(rockchip->aclk_pcie);
- }
+ for (i = 0; i < ROCKCHIP_NUM_CORE_RSTS; i++)
+ rockchip->core_rsts[i].id = rockchip_pci_core_rsts[i];
- rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
- if (IS_ERR(rockchip->aclk_perf_pcie)) {
- dev_err(dev, "aclk_perf clock not found\n");
- return PTR_ERR(rockchip->aclk_perf_pcie);
- }
+ err = devm_reset_control_bulk_get_exclusive(dev,
+ ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot get the Core resets\n");
- rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
- if (IS_ERR(rockchip->hclk_pcie)) {
- dev_err(dev, "hclk clock not found\n");
- return PTR_ERR(rockchip->hclk_pcie);
- }
+ if (rockchip->is_rc)
+ rockchip->perst_gpio = devm_gpiod_get_optional(dev, "ep",
+ GPIOD_OUT_LOW);
+ else
+ rockchip->perst_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_IN);
+ if (IS_ERR(rockchip->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(rockchip->perst_gpio),
+ "failed to get PERST# GPIO\n");
- rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
- if (IS_ERR(rockchip->clk_pcie_pm)) {
- dev_err(dev, "pm clock not found\n");
- return PTR_ERR(rockchip->clk_pcie_pm);
- }
+ rockchip->num_clks = devm_clk_bulk_get_all(dev, &rockchip->clks);
+ if (rockchip->num_clks < 0)
+ return dev_err_probe(dev, rockchip->num_clks,
+ "failed to get clocks\n");
return 0;
}
EXPORT_SYMBOL_GPL(rockchip_pcie_parse_dt);
+#define rockchip_pcie_read_addr(addr) rockchip_pcie_read(rockchip, addr)
+/* 100 ms max wait time for PHY PLLs to lock */
+#define RK_PHY_PLL_LOCK_TIMEOUT_US 100000
+/* Sleep should be less than 20ms */
+#define RK_PHY_PLL_LOCK_SLEEP_US 1000
+
int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
int err, i;
u32 regs;
- err = reset_control_assert(rockchip->aclk_rst);
- if (err) {
- dev_err(dev, "assert aclk_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_assert(rockchip->pclk_rst);
- if (err) {
- dev_err(dev, "assert pclk_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_assert(rockchip->pm_rst);
- if (err) {
- dev_err(dev, "assert pm_rst err %d\n", err);
- return err;
- }
+ err = reset_control_bulk_assert(ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Couldn't assert PM resets\n");
for (i = 0; i < MAX_LANE_NUM; i++) {
err = phy_init(rockchip->phys[i]);
@@ -185,47 +131,19 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
}
}
- err = reset_control_assert(rockchip->core_rst);
+ err = reset_control_bulk_assert(ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
if (err) {
- dev_err(dev, "assert core_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->mgmt_rst);
- if (err) {
- dev_err(dev, "assert mgmt_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->mgmt_sticky_rst);
- if (err) {
- dev_err(dev, "assert mgmt_sticky_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->pipe_rst);
- if (err) {
- dev_err(dev, "assert pipe_rst err %d\n", err);
+ dev_err_probe(dev, err, "Couldn't assert Core resets\n");
goto err_exit_phy;
}
udelay(10);
- err = reset_control_deassert(rockchip->pm_rst);
- if (err) {
- dev_err(dev, "deassert pm_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_deassert(rockchip->aclk_rst);
- if (err) {
- dev_err(dev, "deassert aclk_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_deassert(rockchip->pclk_rst);
+ err = reset_control_bulk_deassert(ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
if (err) {
- dev_err(dev, "deassert pclk_rst err %d\n", err);
+ dev_err(dev, "Couldn't deassert PM resets %d\n", err);
goto err_exit_phy;
}
@@ -236,11 +154,12 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1,
PCIE_CLIENT_CONFIG);
- regs = PCIE_CLIENT_LINK_TRAIN_ENABLE | PCIE_CLIENT_ARI_ENABLE |
+ regs = PCIE_CLIENT_ARI_ENABLE |
PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes);
if (rockchip->is_rc)
- regs |= PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC;
+ regs |= PCIE_CLIENT_LINK_TRAIN_ENABLE |
+ PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC;
else
regs |= PCIE_CLIENT_CONF_DISABLE | PCIE_CLIENT_MODE_EP;
@@ -254,31 +173,20 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
}
}
- /*
- * Please don't reorder the deassert sequence of the following
- * four reset pins.
- */
- err = reset_control_deassert(rockchip->mgmt_sticky_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->core_rst);
- if (err) {
- dev_err(dev, "deassert core_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->mgmt_rst);
+ err = readx_poll_timeout(rockchip_pcie_read_addr,
+ PCIE_CLIENT_SIDE_BAND_STATUS,
+ regs, !(regs & PCIE_CLIENT_PHY_ST),
+ RK_PHY_PLL_LOCK_SLEEP_US,
+ RK_PHY_PLL_LOCK_TIMEOUT_US);
if (err) {
- dev_err(dev, "deassert mgmt_rst err %d\n", err);
+ dev_err(dev, "PHY PLLs could not lock, %d\n", err);
goto err_power_off_phy;
}
- err = reset_control_deassert(rockchip->pipe_rst);
+ err = reset_control_bulk_deassert(ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
if (err) {
- dev_err(dev, "deassert pipe_rst err %d\n", err);
+ dev_err(dev, "Couldn't deassert Core reset %d\n", err);
goto err_power_off_phy;
}
@@ -354,50 +262,18 @@ int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
struct device *dev = rockchip->dev;
int err;
- err = clk_prepare_enable(rockchip->aclk_pcie);
- if (err) {
- dev_err(dev, "unable to enable aclk_pcie clock\n");
- return err;
- }
-
- err = clk_prepare_enable(rockchip->aclk_perf_pcie);
- if (err) {
- dev_err(dev, "unable to enable aclk_perf_pcie clock\n");
- goto err_aclk_perf_pcie;
- }
-
- err = clk_prepare_enable(rockchip->hclk_pcie);
- if (err) {
- dev_err(dev, "unable to enable hclk_pcie clock\n");
- goto err_hclk_pcie;
- }
-
- err = clk_prepare_enable(rockchip->clk_pcie_pm);
- if (err) {
- dev_err(dev, "unable to enable clk_pcie_pm clock\n");
- goto err_clk_pcie_pm;
- }
+ err = clk_bulk_prepare_enable(rockchip->num_clks, rockchip->clks);
+ if (err)
+ return dev_err_probe(dev, err, "failed to enable clocks\n");
return 0;
-
-err_clk_pcie_pm:
- clk_disable_unprepare(rockchip->hclk_pcie);
-err_hclk_pcie:
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
-err_aclk_perf_pcie:
- clk_disable_unprepare(rockchip->aclk_pcie);
- return err;
}
EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks);
-void rockchip_pcie_disable_clocks(void *data)
+void rockchip_pcie_disable_clocks(struct rockchip_pcie *rockchip)
{
- struct rockchip_pcie *rockchip = data;
- clk_disable_unprepare(rockchip->clk_pcie_pm);
- clk_disable_unprepare(rockchip->hclk_pcie);
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
- clk_disable_unprepare(rockchip->aclk_pcie);
+ clk_bulk_disable_unprepare(rockchip->num_clks, rockchip->clks);
}
EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks);
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index 32c3a859c26b..3e82a69b9c00 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -11,37 +11,53 @@
#ifndef _PCIE_ROCKCHIP_H
#define _PCIE_ROCKCHIP_H
+#include <linux/clk.h>
+#include <linux/hw_bitfield.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
+#include <linux/reset.h>
/*
* The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
* bits. This allows atomic updates of the register without locking.
*/
-#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
-#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+#define HWORD_SET_BIT(val) (FIELD_PREP_WM16_CONST((val), 1))
+#define HWORD_CLR_BIT(val) (FIELD_PREP_WM16_CONST((val), 0))
-#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
+#define ENCODE_LANES(x) ((((x) >> 1) & 3))
#define MAX_LANE_NUM 4
#define MAX_REGION_LIMIT 32
#define MIN_EP_APERTURE 28
+#define LINK_TRAIN_TIMEOUT (500 * USEC_PER_MSEC)
#define PCIE_CLIENT_BASE 0x0
#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
-#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
-#define PCIE_CLIENT_CONF_DISABLE HIWORD_UPDATE(0x0001, 0)
-#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
-#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
-#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
-#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
-#define PCIE_CLIENT_MODE_EP HIWORD_UPDATE(0x0040, 0)
-#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
-#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
+#define PCIE_CLIENT_CONF_ENABLE HWORD_SET_BIT(0x0001)
+#define PCIE_CLIENT_CONF_DISABLE HWORD_CLR_BIT(0x0001)
+#define PCIE_CLIENT_LINK_TRAIN_ENABLE HWORD_SET_BIT(0x0002)
+#define PCIE_CLIENT_LINK_TRAIN_DISABLE HWORD_CLR_BIT(0x0002)
+#define PCIE_CLIENT_ARI_ENABLE HWORD_SET_BIT(0x0008)
+#define PCIE_CLIENT_CONF_LANE_NUM(x) FIELD_PREP_WM16(0x0030, ENCODE_LANES(x))
+#define PCIE_CLIENT_MODE_RC HWORD_SET_BIT(0x0040)
+#define PCIE_CLIENT_MODE_EP HWORD_CLR_BIT(0x0040)
+#define PCIE_CLIENT_GEN_SEL_1 HWORD_CLR_BIT(0x0080)
+#define PCIE_CLIENT_GEN_SEL_2 HWORD_SET_BIT(0x0080)
+#define PCIE_CLIENT_LEGACY_INT_CTRL (PCIE_CLIENT_BASE + 0x0c)
+#define PCIE_CLIENT_INT_IN_ASSERT HWORD_SET_BIT(0x0002)
+#define PCIE_CLIENT_INT_IN_DEASSERT HWORD_CLR_BIT(0x0002)
+#define PCIE_CLIENT_INT_PEND_ST_PEND HWORD_SET_BIT(0x0001)
+#define PCIE_CLIENT_INT_PEND_ST_NORMAL HWORD_CLR_BIT(0x0001)
+#define PCIE_CLIENT_SIDE_BAND_STATUS (PCIE_CLIENT_BASE + 0x20)
+#define PCIE_CLIENT_PHY_ST BIT(12)
#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c)
#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0)
#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18
#define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19
+#define PCIE_CLIENT_BASIC_STATUS0 (PCIE_CLIENT_BASE + 0x44)
+#define PCIE_CLIENT_NEG_LINK_WIDTH_MASK GENMASK(7, 6)
+#define PCIE_CLIENT_NEG_LINK_WIDTH_SHIFT 6
+#define PCIE_CLIENT_NEG_LINK_SPEED BIT(5)
#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000
@@ -79,6 +95,8 @@
#define PCIE_CORE_CTRL_MGMT_BASE 0x900000
#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000)
+#define PCIE_CORE_PL_CONF_LS_MASK 0x00000001
+#define PCIE_CORE_PL_CONF_LS_READY 0x00000001
#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008
#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018
#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006
@@ -132,26 +150,24 @@
#define PCIE_RC_RP_ATS_BASE 0x400000
#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
+#define PCIE_EP_PF_CONFIG_REGS_BASE 0x800000
#define PCIE_RC_CONFIG_BASE 0xa00000
+#define PCIE_EP_CONFIG_BASE 0xa00000
+#define PCIE_EP_CONFIG_DID_VID (PCIE_EP_CONFIG_BASE + 0x00)
+#define PCIE_EP_CONFIG_LCS (PCIE_EP_CONFIG_BASE + 0xd0)
#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
-#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
-#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
-#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
-#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
-#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8)
-#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5)
-#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5)
-#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
-#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
-#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
+#define PCIE_RC_CONFIG_CR (PCIE_RC_CONFIG_BASE + 0xc0)
#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
+#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
+#define MIN_AXI_ADDR_BITS_PASSED 8
+#define PCIE_ADDR_MASK GENMASK_ULL(63, MIN_AXI_ADDR_BITS_PASSED)
#define PCIE_CORE_AXI_CONF_BASE 0xc00000
#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f
-#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00
+#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR PCIE_ADDR_MASK
#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4)
#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8)
#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc)
@@ -159,7 +175,7 @@
#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800
#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0)
#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f
-#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00
+#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR PCIE_ADDR_MASK
#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4)
/* Size of one AXI Region (not Region 0) */
@@ -174,12 +190,11 @@
#define AXI_WRAPPER_TYPE1_CFG 0xb
#define AXI_WRAPPER_NOR_MSG 0xc
-#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
-#define MIN_AXI_ADDR_BITS_PASSED 8
#define PCIE_RC_SEND_PME_OFF 0x11960
-#define ROCKCHIP_VENDOR_ID 0x1d87
#define PCIE_LINK_IS_L2(x) \
(((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
+#define PCIE_LINK_TRAINING_DONE(x) \
+ (((x) & PCIE_CORE_PL_CONF_LS_MASK) == PCIE_CORE_PL_CONF_LS_READY)
#define PCIE_LINK_UP(x) \
(((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP)
#define PCIE_LINK_IS_GEN2(x) \
@@ -191,20 +206,6 @@
#define RC_REGION_0_TYPE_MASK GENMASK(3, 0)
#define MAX_AXI_WRAPPER_REGION_NUM 33
-#define ROCKCHIP_PCIE_MSG_ROUTING_TO_RC 0x0
-#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ADDR 0x1
-#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ID 0x2
-#define ROCKCHIP_PCIE_MSG_ROUTING_BROADCAST 0x3
-#define ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX 0x4
-#define ROCKCHIP_PCIE_MSG_ROUTING_PME_ACK 0x5
-#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA 0x20
-#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTB 0x21
-#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTC 0x22
-#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTD 0x23
-#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA 0x24
-#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTB 0x25
-#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTC 0x26
-#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTD 0x27
#define ROCKCHIP_PCIE_MSG_ROUTING_MASK GENMASK(7, 5)
#define ROCKCHIP_PCIE_MSG_ROUTING(route) \
(((route) << 5) & ROCKCHIP_PCIE_MSG_ROUTING_MASK)
@@ -216,21 +217,38 @@
#define ROCKCHIP_PCIE_EP_CMD_STATUS 0x4
#define ROCKCHIP_PCIE_EP_CMD_STATUS_IS BIT(19)
#define ROCKCHIP_PCIE_EP_MSI_CTRL_REG 0x90
+#define ROCKCHIP_PCIE_EP_MSI_CP1_OFFSET 8
+#define ROCKCHIP_PCIE_EP_MSI_CP1_MASK GENMASK(15, 8)
+#define ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET 16
#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET 17
#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK GENMASK(19, 17)
#define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET 20
#define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK GENMASK(22, 20)
#define ROCKCHIP_PCIE_EP_MSI_CTRL_ME BIT(16)
#define ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP BIT(24)
+#define ROCKCHIP_PCIE_EP_MSIX_CAP_REG 0xb0
+#define ROCKCHIP_PCIE_EP_MSIX_CAP_CP_OFFSET 8
+#define ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK GENMASK(15, 8)
#define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR 0x1
#define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR 0x3
-#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
+
+#define ROCKCHIP_PCIE_AT_MIN_NUM_BITS 8
+#define ROCKCHIP_PCIE_AT_MAX_NUM_BITS 20
+#define ROCKCHIP_PCIE_AT_SIZE_ALIGN (1UL << ROCKCHIP_PCIE_AT_MIN_NUM_BITS)
+
+#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) \
+ (PCIE_EP_PF_CONFIG_REGS_BASE + (((fn) << 12) & GENMASK(19, 12)))
+#define ROCKCHIP_PCIE_EP_VIRT_FUNC_BASE(fn) \
+ (PCIE_EP_PF_CONFIG_REGS_BASE + 0x10000 + (((fn) << 12) & GENMASK(19, 12)))
+
+#define ROCKCHIP_PCIE_AT_MIN_NUM_BITS 8
+#define ROCKCHIP_PCIE_AT_MAX_NUM_BITS 20
+#define ROCKCHIP_PCIE_AT_SIZE_ALIGN (1UL << ROCKCHIP_PCIE_AT_MIN_NUM_BITS)
+
#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
- (PCIE_RC_RP_ATS_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
+ (PCIE_CORE_AXI_CONF_BASE + 0x0828 + (fn) * 0x0040 + (bar) * 0x0008)
#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
- (PCIE_RC_RP_ATS_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
-#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
- (PCIE_RC_RP_ATS_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
+ (PCIE_CORE_AXI_CONF_BASE + 0x082c + (fn) * 0x0040 + (bar) * 0x0008)
#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
(((devfn) << 12) & \
@@ -238,20 +256,21 @@
#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
(((bus) << 20) & ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
+#define PCIE_RC_EP_ATR_OB_REGIONS_1_32 (PCIE_CORE_AXI_CONF_BASE + 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
+ (PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x0000 + ((r) & 0x1f) * 0x0020)
#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
- (PCIE_RC_RP_ATS_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
+ (PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x0004 + ((r) & 0x1f) * 0x0020)
#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
(((devfn) << 24) & ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r) \
- (PCIE_RC_RP_ATS_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
-#define ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r) \
- (PCIE_RC_RP_ATS_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
-#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
- (PCIE_RC_RP_ATS_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
-#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
- (PCIE_RC_RP_ATS_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
+ (PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x0008 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r) \
+ (PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x000c + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC2(r) \
+ (PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x0010 + ((r) & 0x1f) * 0x0020)
#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn) \
(PCIE_CORE_CTRL_MGMT_BASE + 0x0240 + (fn) * 0x0008)
@@ -268,27 +287,37 @@
(((c) << ((b) * 8 + 5)) & \
ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+#define ROCKCHIP_NUM_PM_RSTS ARRAY_SIZE(rockchip_pci_pm_rsts)
+#define ROCKCHIP_NUM_CORE_RSTS ARRAY_SIZE(rockchip_pci_core_rsts)
+
+static const char * const rockchip_pci_pm_rsts[] = {
+ "pm",
+ "pclk",
+ "aclk",
+};
+
+/* NOTE: Do not reorder the deassert sequence of the following reset pins */
+static const char * const rockchip_pci_core_rsts[] = {
+ "pipe",
+ "mgmt",
+ "core",
+ "mgmt-sticky",
+};
+
struct rockchip_pcie {
void __iomem *reg_base; /* DT axi-base */
void __iomem *apb_base; /* DT apb-base */
bool legacy_phy;
struct phy *phys[MAX_LANE_NUM];
- struct reset_control *core_rst;
- struct reset_control *mgmt_rst;
- struct reset_control *mgmt_sticky_rst;
- struct reset_control *pipe_rst;
- struct reset_control *pm_rst;
- struct reset_control *aclk_rst;
- struct reset_control *pclk_rst;
- struct clk *aclk_pcie;
- struct clk *aclk_perf_pcie;
- struct clk *hclk_pcie;
- struct clk *clk_pcie_pm;
+ struct reset_control_bulk_data pm_rsts[ROCKCHIP_NUM_PM_RSTS];
+ struct reset_control_bulk_data core_rsts[ROCKCHIP_NUM_CORE_RSTS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regulator *vpcie12v; /* 12V power supply */
struct regulator *vpcie3v3; /* 3.3V power supply */
struct regulator *vpcie1v8; /* 1.8V power supply */
struct regulator *vpcie0v9; /* 0.9V power supply */
- struct gpio_desc *ep_gpio;
+ struct gpio_desc *perst_gpio;
u32 lanes;
u8 lanes_map;
int link_gen;
@@ -317,7 +346,7 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip);
int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip);
void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip);
int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip);
-void rockchip_pcie_disable_clocks(void *data);
+void rockchip_pcie_disable_clocks(struct rockchip_pcie *rockchip);
void rockchip_pcie_cfg_configuration_accesses(
struct rockchip_pcie *rockchip, u32 type);
diff --git a/drivers/pci/controller/pcie-rzg3s-host.c b/drivers/pci/controller/pcie-rzg3s-host.c
new file mode 100644
index 000000000000..667e6d629474
--- /dev/null
+++ b/drivers/pci/controller/pcie-rzg3s-host.c
@@ -0,0 +1,1761 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe driver for Renesas RZ/G3S SoCs
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ *
+ * Based on:
+ * drivers/pci/controller/pcie-rcar-host.c
+ * Copyright (C) 2009 - 2011 Paul Mundt
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mutex.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/units.h>
+
+#include "../pci.h"
+
+/* AXI registers */
+#define RZG3S_PCI_REQDATA(id) (0x80 + (id) * 0x4)
+#define RZG3S_PCI_REQRCVDAT 0x8c
+
+#define RZG3S_PCI_REQADR1 0x90
+#define RZG3S_PCI_REQADR1_BUS GENMASK(31, 24)
+#define RZG3S_PCI_REQADR1_DEV GENMASK(23, 19)
+#define RZG3S_PCI_REQADR1_FUNC GENMASK(18, 16)
+#define RZG3S_PCI_REQADR1_REG GENMASK(11, 0)
+
+#define RZG3S_PCI_REQBE 0x98
+#define RZG3S_PCI_REQBE_BYTE_EN GENMASK(3, 0)
+
+#define RZG3S_PCI_REQISS 0x9c
+#define RZG3S_PCI_REQISS_MOR_STATUS GENMASK(18, 16)
+#define RZG3S_PCI_REQISS_TR_TYPE GENMASK(11, 8)
+#define RZG3S_PCI_REQISS_TR_TP0_RD FIELD_PREP(RZG3S_PCI_REQISS_TR_TYPE, 0x4)
+#define RZG3S_PCI_REQISS_TR_TP0_WR FIELD_PREP(RZG3S_PCI_REQISS_TR_TYPE, 0x5)
+#define RZG3S_PCI_REQISS_TR_TP1_RD FIELD_PREP(RZG3S_PCI_REQISS_TR_TYPE, 0x6)
+#define RZG3S_PCI_REQISS_TR_TP1_WR FIELD_PREP(RZG3S_PCI_REQISS_TR_TYPE, 0x7)
+#define RZG3S_PCI_REQISS_REQ_ISSUE BIT(0)
+
+#define RZG3S_PCI_MSIRCVWADRL 0x100
+#define RZG3S_PCI_MSIRCVWADRL_MASK GENMASK(31, 3)
+#define RZG3S_PCI_MSIRCVWADRL_MSG_DATA_ENA BIT(1)
+#define RZG3S_PCI_MSIRCVWADRL_ENA BIT(0)
+
+#define RZG3S_PCI_MSIRCVWADRU 0x104
+
+#define RZG3S_PCI_MSIRCVWMSKL 0x108
+#define RZG3S_PCI_MSIRCVWMSKL_MASK GENMASK(31, 2)
+
+#define RZG3S_PCI_PINTRCVIE 0x110
+#define RZG3S_PCI_PINTRCVIE_INTX(i) BIT(i)
+#define RZG3S_PCI_PINTRCVIE_MSI BIT(4)
+
+#define RZG3S_PCI_PINTRCVIS 0x114
+#define RZG3S_PCI_PINTRCVIS_INTX(i) BIT(i)
+#define RZG3S_PCI_PINTRCVIS_MSI BIT(4)
+
+#define RZG3S_PCI_MSGRCVIE 0x120
+#define RZG3S_PCI_MSGRCVIE_MSG_RCV BIT(24)
+
+#define RZG3S_PCI_MSGRCVIS 0x124
+#define RZG3S_PCI_MSGRCVIS_MRI BIT(24)
+
+#define RZG3S_PCI_PEIE0 0x200
+
+#define RZG3S_PCI_PEIS0 0x204
+#define RZG3S_PCI_PEIS0_RX_DLLP_PM_ENTER BIT(12)
+#define RZG3S_PCI_PEIS0_DL_UPDOWN BIT(9)
+
+#define RZG3S_PCI_PEIE1 0x208
+#define RZG3S_PCI_PEIS1 0x20c
+#define RZG3S_PCI_AMEIS 0x214
+#define RZG3S_PCI_ASEIS1 0x224
+
+#define RZG3S_PCI_PCSTAT1 0x408
+#define RZG3S_PCI_PCSTAT1_LTSSM_STATE GENMASK(14, 10)
+#define RZG3S_PCI_PCSTAT1_DL_DOWN_STS BIT(0)
+
+#define RZG3S_PCI_PCCTRL2 0x410
+#define RZG3S_PCI_PCCTRL2_LS_CHG GENMASK(9, 8)
+#define RZG3S_PCI_PCCTRL2_LS_CHG_REQ BIT(0)
+
+#define RZG3S_PCI_PCSTAT2 0x414
+#define RZG3S_PCI_PCSTAT2_LS_CHG_DONE BIT(28)
+#define RZG3S_PCI_PCSTAT2_SDRIRE GENMASK(7, 1)
+
+#define RZG3S_PCI_PERM 0x300
+#define RZG3S_PCI_PERM_CFG_HWINIT_EN BIT(2)
+#define RZG3S_PCI_PERM_PIPE_PHY_REG_EN BIT(1)
+
+#define RZG3S_PCI_MSIRE(id) (0x600 + (id) * 0x10)
+#define RZG3S_PCI_MSIRE_ENA BIT(0)
+
+#define RZG3S_PCI_MSIRM(id) (0x608 + (id) * 0x10)
+#define RZG3S_PCI_MSIRS(id) (0x60c + (id) * 0x10)
+
+#define RZG3S_PCI_AWBASEL(id) (0x1000 + (id) * 0x20)
+#define RZG3S_PCI_AWBASEL_WIN_ENA BIT(0)
+
+#define RZG3S_PCI_AWBASEU(id) (0x1004 + (id) * 0x20)
+#define RZG3S_PCI_AWMASKL(id) (0x1008 + (id) * 0x20)
+#define RZG3S_PCI_AWMASKU(id) (0x100c + (id) * 0x20)
+#define RZG3S_PCI_ADESTL(id) (0x1010 + (id) * 0x20)
+#define RZG3S_PCI_ADESTU(id) (0x1014 + (id) * 0x20)
+
+#define RZG3S_PCI_PWBASEL(id) (0x1100 + (id) * 0x20)
+#define RZG3S_PCI_PWBASEL_ENA BIT(0)
+
+#define RZG3S_PCI_PWBASEU(id) (0x1104 + (id) * 0x20)
+#define RZG3S_PCI_PDESTL(id) (0x1110 + (id) * 0x20)
+#define RZG3S_PCI_PDESTU(id) (0x1114 + (id) * 0x20)
+#define RZG3S_PCI_PWMASKL(id) (0x1108 + (id) * 0x20)
+#define RZG3S_PCI_PWMASKU(id) (0x110c + (id) * 0x20)
+
+/* PHY control registers */
+#define RZG3S_PCI_PHY_XCFGD(id) (0x2000 + (id) * 0x10)
+#define RZG3S_PCI_PHY_XCFGD_NUM 39
+
+#define RZG3S_PCI_PHY_XCFGA_CMN(id) (0x2400 + (id) * 0x10)
+#define RZG3S_PCI_PHY_XCFGA_CMN_NUM 16
+
+#define RZG3S_PCI_PHY_XCFGA_RX(id) (0x2500 + (id) * 0x10)
+#define RZG3S_PCI_PHY_XCFGA_RX_NUM 13
+
+#define RZG3S_PCI_PHY_XCFGA_TX 0x25d0
+
+#define RZG3S_PCI_PHY_XCFG_CTRL 0x2a20
+#define RZG3S_PCI_PHY_XCFG_CTRL_PHYREG_SEL BIT(0)
+
+/* PCIe registers */
+#define RZG3S_PCI_CFG_BASE 0x6000
+#define RZG3S_PCI_CFG_BARMSK00L 0xa0
+#define RZG3S_PCI_CFG_BARMSK00U 0xa4
+
+#define RZG3S_PCI_CFG_PCIEC 0x60
+
+/* System controller registers */
+#define RZG3S_SYS_PCIE_RST_RSM_B 0xd74
+#define RZG3S_SYS_PCIE_RST_RSM_B_MASK BIT(0)
+
+/* Maximum number of windows */
+#define RZG3S_MAX_WINDOWS 8
+
+/* Number of MSI interrupts per register */
+#define RZG3S_PCI_MSI_INT_PER_REG 32
+/* The number of MSI interrupts */
+#define RZG3S_PCI_MSI_INT_NR RZG3S_PCI_MSI_INT_PER_REG
+
+/* Timeouts experimentally determined */
+#define RZG3S_REQ_ISSUE_TIMEOUT_US 2500
+
+/**
+ * struct rzg3s_pcie_msi - RZ/G3S PCIe MSI data structure
+ * @domain: IRQ domain
+ * @map: bitmap with the allocated MSIs
+ * @dma_addr: address of the allocated MSI window
+ * @window_base: base address of the MSI window
+ * @pages: allocated pages for MSI window mapping
+ * @map_lock: lock for bitmap with the allocated MSIs
+ * @irq: MSI interrupt
+ */
+struct rzg3s_pcie_msi {
+ struct irq_domain *domain;
+ DECLARE_BITMAP(map, RZG3S_PCI_MSI_INT_NR);
+ dma_addr_t dma_addr;
+ dma_addr_t window_base;
+ unsigned long pages;
+ struct mutex map_lock;
+ int irq;
+};
+
+struct rzg3s_pcie_host;
+
+/**
+ * struct rzg3s_pcie_soc_data - SoC specific data
+ * @init_phy: PHY initialization function
+ * @power_resets: array with the resets that need to be de-asserted after
+ * power-on
+ * @cfg_resets: array with the resets that need to be de-asserted after
+ * configuration
+ * @num_power_resets: number of power resets
+ * @num_cfg_resets: number of configuration resets
+ */
+struct rzg3s_pcie_soc_data {
+ int (*init_phy)(struct rzg3s_pcie_host *host);
+ const char * const *power_resets;
+ const char * const *cfg_resets;
+ u8 num_power_resets;
+ u8 num_cfg_resets;
+};
+
+/**
+ * struct rzg3s_pcie_port - RZ/G3S PCIe Root Port data structure
+ * @refclk: PCIe reference clock
+ * @vendor_id: Vendor ID
+ * @device_id: Device ID
+ */
+struct rzg3s_pcie_port {
+ struct clk *refclk;
+ u32 vendor_id;
+ u32 device_id;
+};
+
+/**
+ * struct rzg3s_pcie_host - RZ/G3S PCIe data structure
+ * @axi: base address for AXI registers
+ * @pcie: base address for PCIe registers
+ * @dev: struct device
+ * @power_resets: reset control signals that should be set after power up
+ * @cfg_resets: reset control signals that should be set after configuration
+ * @sysc: SYSC regmap
+ * @intx_domain: INTx IRQ domain
+ * @data: SoC specific data
+ * @msi: MSI data structure
+ * @port: PCIe Root Port
+ * @hw_lock: lock for access to the HW resources
+ * @intx_irqs: INTx interrupts
+ * @max_link_speed: maximum supported link speed
+ */
+struct rzg3s_pcie_host {
+ void __iomem *axi;
+ void __iomem *pcie;
+ struct device *dev;
+ struct reset_control_bulk_data *power_resets;
+ struct reset_control_bulk_data *cfg_resets;
+ struct regmap *sysc;
+ struct irq_domain *intx_domain;
+ const struct rzg3s_pcie_soc_data *data;
+ struct rzg3s_pcie_msi msi;
+ struct rzg3s_pcie_port port;
+ raw_spinlock_t hw_lock;
+ int intx_irqs[PCI_NUM_INTX];
+ int max_link_speed;
+};
+
+#define rzg3s_msi_to_host(_msi) container_of(_msi, struct rzg3s_pcie_host, msi)
+
+static void rzg3s_pcie_update_bits(void __iomem *base, u32 offset, u32 mask,
+ u32 val)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(base + offset);
+ tmp &= ~mask;
+ tmp |= val & mask;
+ writel_relaxed(tmp, base + offset);
+}
+
+static int rzg3s_pcie_child_issue_request(struct rzg3s_pcie_host *host)
+{
+ u32 val;
+ int ret;
+
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_REQISS,
+ RZG3S_PCI_REQISS_REQ_ISSUE,
+ RZG3S_PCI_REQISS_REQ_ISSUE);
+ ret = readl_poll_timeout_atomic(host->axi + RZG3S_PCI_REQISS, val,
+ !(val & RZG3S_PCI_REQISS_REQ_ISSUE),
+ 5, RZG3S_REQ_ISSUE_TIMEOUT_US);
+
+ if (val & RZG3S_PCI_REQISS_MOR_STATUS)
+ return -EIO;
+
+ return ret;
+}
+
+static void rzg3s_pcie_child_prepare_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct rzg3s_pcie_host *host = bus->sysdata;
+ unsigned int dev, func, reg;
+
+ dev = PCI_SLOT(devfn);
+ func = PCI_FUNC(devfn);
+ reg = where & ~0x3;
+
+ /* Set the destination */
+ writel_relaxed(FIELD_PREP(RZG3S_PCI_REQADR1_BUS, bus->number) |
+ FIELD_PREP(RZG3S_PCI_REQADR1_DEV, dev) |
+ FIELD_PREP(RZG3S_PCI_REQADR1_FUNC, func) |
+ FIELD_PREP(RZG3S_PCI_REQADR1_REG, reg),
+ host->axi + RZG3S_PCI_REQADR1);
+
+ /* Set byte enable */
+ writel_relaxed(RZG3S_PCI_REQBE_BYTE_EN, host->axi + RZG3S_PCI_REQBE);
+}
+
+static int rzg3s_pcie_child_read_conf(struct rzg3s_pcie_host *host,
+ struct pci_bus *bus, unsigned int devfn,
+ int where, u32 *data)
+{
+ bool type0 = pci_is_root_bus(bus->parent) ? true : false;
+ int ret;
+
+ rzg3s_pcie_child_prepare_bus(bus, devfn, where);
+
+ /* Set the type of request */
+ writel_relaxed(type0 ? RZG3S_PCI_REQISS_TR_TP0_RD :
+ RZG3S_PCI_REQISS_TR_TP1_RD,
+ host->axi + RZG3S_PCI_REQISS);
+
+ /* Issue the request and wait to finish */
+ ret = rzg3s_pcie_child_issue_request(host);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
+
+ /* Read the data */
+ *data = readl_relaxed(host->axi + RZG3S_PCI_REQRCVDAT);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rzg3s_pcie_child_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct rzg3s_pcie_host *host = bus->sysdata;
+ int ret;
+
+ ret = rzg3s_pcie_child_read_conf(host, bus, devfn, where, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ if (size <= 2)
+ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rzg3s_pcie_child_write_conf(struct rzg3s_pcie_host *host,
+ struct pci_bus *bus, unsigned int devfn,
+ int where, u32 data)
+{
+ bool type0 = pci_is_root_bus(bus->parent) ? true : false;
+ int ret;
+
+ rzg3s_pcie_child_prepare_bus(bus, devfn, where);
+
+ /* Set the write data */
+ writel_relaxed(0, host->axi + RZG3S_PCI_REQDATA(0));
+ writel_relaxed(0, host->axi + RZG3S_PCI_REQDATA(1));
+ writel_relaxed(data, host->axi + RZG3S_PCI_REQDATA(2));
+
+ /* Set the type of request */
+ writel_relaxed(type0 ? RZG3S_PCI_REQISS_TR_TP0_WR :
+ RZG3S_PCI_REQISS_TR_TP1_WR,
+ host->axi + RZG3S_PCI_REQISS);
+
+ /* Issue the request and wait to finish */
+ ret = rzg3s_pcie_child_issue_request(host);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rzg3s_pcie_child_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct rzg3s_pcie_host *host = bus->sysdata;
+ u32 data, shift;
+ int ret;
+
+ if (size == 4)
+ return rzg3s_pcie_child_write_conf(host, bus, devfn, where, val);
+
+ /*
+ * Controller does 32 bit accesses. To do byte accesses software need
+ * to do read/modify/write. This may have potential side effects. For
+ * example, software may perform a 16-bit write. If the hardware only
+ * supports 32-bit accesses, we must do a 32-bit read, merge in the 16
+ * bits we intend to write, followed by a 32-bit write. If the 16 bits
+ * we *don't* intend to write happen to have any RW1C
+ * (write-one-to-clear) bits set, we just inadvertently cleared
+ * something we shouldn't have.
+ */
+ if (!bus->unsafe_warn) {
+ dev_warn(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
+ size, pci_domain_nr(bus), bus->number,
+ PCI_SLOT(devfn), PCI_FUNC(devfn), where);
+ bus->unsafe_warn = 1;
+ }
+
+ ret = rzg3s_pcie_child_read_conf(host, bus, devfn, where, &data);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ if (size == 1) {
+ shift = BITS_PER_BYTE * (where & 3);
+ data &= ~(0xff << shift);
+ data |= ((val & 0xff) << shift);
+ } else if (size == 2) {
+ shift = BITS_PER_BYTE * (where & 2);
+ data &= ~(0xffff << shift);
+ data |= ((val & 0xffff) << shift);
+ } else {
+ data = val;
+ }
+
+ return rzg3s_pcie_child_write_conf(host, bus, devfn, where, data);
+}
+
+static struct pci_ops rzg3s_pcie_child_ops = {
+ .read = rzg3s_pcie_child_read,
+ .write = rzg3s_pcie_child_write,
+};
+
+static void __iomem *rzg3s_pcie_root_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct rzg3s_pcie_host *host = bus->sysdata;
+
+ if (devfn)
+ return NULL;
+
+ return host->pcie + where;
+}
+
+/* Serialized by 'pci_lock' */
+static int rzg3s_pcie_root_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct rzg3s_pcie_host *host = bus->sysdata;
+ int ret;
+
+ /* Enable access control to the CFGU */
+ writel_relaxed(RZG3S_PCI_PERM_CFG_HWINIT_EN,
+ host->axi + RZG3S_PCI_PERM);
+
+ ret = pci_generic_config_write(bus, devfn, where, size, val);
+
+ /* Disable access control to the CFGU */
+ writel_relaxed(0, host->axi + RZG3S_PCI_PERM);
+
+ return ret;
+}
+
+static struct pci_ops rzg3s_pcie_root_ops = {
+ .read = pci_generic_config_read,
+ .write = rzg3s_pcie_root_write,
+ .map_bus = rzg3s_pcie_root_map_bus,
+};
+
+static void rzg3s_pcie_intx_irq_handler(struct irq_desc *desc)
+{
+ struct rzg3s_pcie_host *host = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
+ u32 intx = irq - host->intx_irqs[0];
+
+ chained_irq_enter(chip, desc);
+ generic_handle_domain_irq(host->intx_domain, intx);
+ chained_irq_exit(chip, desc);
+}
+
+static irqreturn_t rzg3s_pcie_msi_irq(int irq, void *data)
+{
+ u8 regs = RZG3S_PCI_MSI_INT_NR / RZG3S_PCI_MSI_INT_PER_REG;
+ DECLARE_BITMAP(bitmap, RZG3S_PCI_MSI_INT_NR);
+ struct rzg3s_pcie_host *host = data;
+ struct rzg3s_pcie_msi *msi = &host->msi;
+ unsigned long bit;
+ u32 status;
+
+ status = readl_relaxed(host->axi + RZG3S_PCI_PINTRCVIS);
+ if (!(status & RZG3S_PCI_PINTRCVIS_MSI))
+ return IRQ_NONE;
+
+ /* Clear the MSI */
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIS,
+ RZG3S_PCI_PINTRCVIS_MSI,
+ RZG3S_PCI_PINTRCVIS_MSI);
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_MSGRCVIS,
+ RZG3S_PCI_MSGRCVIS_MRI, RZG3S_PCI_MSGRCVIS_MRI);
+
+ for (u8 reg_id = 0; reg_id < regs; reg_id++) {
+ status = readl_relaxed(host->axi + RZG3S_PCI_MSIRS(reg_id));
+ bitmap_write(bitmap, status, reg_id * RZG3S_PCI_MSI_INT_PER_REG,
+ RZG3S_PCI_MSI_INT_PER_REG);
+ }
+
+ for_each_set_bit(bit, bitmap, RZG3S_PCI_MSI_INT_NR) {
+ int ret;
+
+ ret = generic_handle_domain_irq(msi->domain, bit);
+ if (ret) {
+ u8 reg_bit = bit % RZG3S_PCI_MSI_INT_PER_REG;
+ u8 reg_id = bit / RZG3S_PCI_MSI_INT_PER_REG;
+
+ /* Unknown MSI, just clear it */
+ writel_relaxed(BIT(reg_bit),
+ host->axi + RZG3S_PCI_MSIRS(reg_id));
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void rzg3s_pcie_msi_irq_ack(struct irq_data *d)
+{
+ struct rzg3s_pcie_msi *msi = irq_data_get_irq_chip_data(d);
+ struct rzg3s_pcie_host *host = rzg3s_msi_to_host(msi);
+ u8 reg_bit = d->hwirq % RZG3S_PCI_MSI_INT_PER_REG;
+ u8 reg_id = d->hwirq / RZG3S_PCI_MSI_INT_PER_REG;
+
+ guard(raw_spinlock_irqsave)(&host->hw_lock);
+
+ writel_relaxed(BIT(reg_bit), host->axi + RZG3S_PCI_MSIRS(reg_id));
+}
+
+static void rzg3s_pcie_msi_irq_mask(struct irq_data *d)
+{
+ struct rzg3s_pcie_msi *msi = irq_data_get_irq_chip_data(d);
+ struct rzg3s_pcie_host *host = rzg3s_msi_to_host(msi);
+ u8 reg_bit = d->hwirq % RZG3S_PCI_MSI_INT_PER_REG;
+ u8 reg_id = d->hwirq / RZG3S_PCI_MSI_INT_PER_REG;
+
+ guard(raw_spinlock_irqsave)(&host->hw_lock);
+
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_MSIRM(reg_id), BIT(reg_bit),
+ BIT(reg_bit));
+}
+
+static void rzg3s_pcie_msi_irq_unmask(struct irq_data *d)
+{
+ struct rzg3s_pcie_msi *msi = irq_data_get_irq_chip_data(d);
+ struct rzg3s_pcie_host *host = rzg3s_msi_to_host(msi);
+ u8 reg_bit = d->hwirq % RZG3S_PCI_MSI_INT_PER_REG;
+ u8 reg_id = d->hwirq / RZG3S_PCI_MSI_INT_PER_REG;
+
+ guard(raw_spinlock_irqsave)(&host->hw_lock);
+
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_MSIRM(reg_id), BIT(reg_bit),
+ 0);
+}
+
+static void rzg3s_pcie_irq_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct rzg3s_pcie_msi *msi = irq_data_get_irq_chip_data(data);
+ struct rzg3s_pcie_host *host = rzg3s_msi_to_host(msi);
+ u32 lo, hi;
+
+ /*
+ * Enable and msg data enable bits are part of the address lo. Drop
+ * them along with the unused bit.
+ */
+ lo = readl_relaxed(host->axi + RZG3S_PCI_MSIRCVWADRL) &
+ RZG3S_PCI_MSIRCVWADRL_MASK;
+ hi = readl_relaxed(host->axi + RZG3S_PCI_MSIRCVWADRU);
+
+ msg->address_lo = lo;
+ msg->address_hi = hi;
+ msg->data = data->hwirq;
+}
+
+static struct irq_chip rzg3s_pcie_msi_bottom_chip = {
+ .name = "rzg3s-pcie-msi",
+ .irq_ack = rzg3s_pcie_msi_irq_ack,
+ .irq_mask = rzg3s_pcie_msi_irq_mask,
+ .irq_unmask = rzg3s_pcie_msi_irq_unmask,
+ .irq_compose_msi_msg = rzg3s_pcie_irq_compose_msi_msg,
+};
+
+static int rzg3s_pcie_msi_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ struct rzg3s_pcie_msi *msi = domain->host_data;
+ int hwirq;
+
+ scoped_guard(mutex, &msi->map_lock) {
+ hwirq = bitmap_find_free_region(msi->map, RZG3S_PCI_MSI_INT_NR,
+ order_base_2(nr_irqs));
+ }
+
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ for (unsigned int i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &rzg3s_pcie_msi_bottom_chip,
+ domain->host_data, handle_edge_irq, NULL,
+ NULL);
+ }
+
+ return 0;
+}
+
+static void rzg3s_pcie_msi_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct rzg3s_pcie_msi *msi = domain->host_data;
+
+ guard(mutex)(&msi->map_lock);
+
+ bitmap_release_region(msi->map, d->hwirq, order_base_2(nr_irqs));
+}
+
+static const struct irq_domain_ops rzg3s_pcie_msi_domain_ops = {
+ .alloc = rzg3s_pcie_msi_domain_alloc,
+ .free = rzg3s_pcie_msi_domain_free,
+};
+
+#define RZG3S_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+
+#define RZG3S_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \
+ MSI_GENERIC_FLAGS_MASK)
+
+static const struct msi_parent_ops rzg3s_pcie_msi_parent_ops = {
+ .required_flags = RZG3S_PCIE_MSI_FLAGS_REQUIRED,
+ .supported_flags = RZG3S_PCIE_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "RZG3S-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
+static int rzg3s_pcie_msi_allocate_domains(struct rzg3s_pcie_msi *msi)
+{
+ struct rzg3s_pcie_host *host = rzg3s_msi_to_host(msi);
+ struct device *dev = host->dev;
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &rzg3s_pcie_msi_domain_ops,
+ .size = RZG3S_PCI_MSI_INT_NR,
+ .host_data = msi,
+ };
+
+ msi->domain = msi_create_parent_irq_domain(&info,
+ &rzg3s_pcie_msi_parent_ops);
+ if (!msi->domain)
+ return dev_err_probe(dev, -ENOMEM,
+ "failed to create IRQ domain\n");
+
+ return 0;
+}
+
+static int rzg3s_pcie_msi_hw_setup(struct rzg3s_pcie_host *host)
+{
+ u8 regs = RZG3S_PCI_MSI_INT_NR / RZG3S_PCI_MSI_INT_PER_REG;
+ struct rzg3s_pcie_msi *msi = &host->msi;
+
+ /*
+ * Set MSI window size. HW will set the window to
+ * RZG3S_PCI_MSI_INT_NR * 4 bytes.
+ */
+ writel_relaxed(FIELD_PREP(RZG3S_PCI_MSIRCVWMSKL_MASK,
+ RZG3S_PCI_MSI_INT_NR - 1),
+ host->axi + RZG3S_PCI_MSIRCVWMSKL);
+
+ /* Set MSI window address and enable MSI window */
+ writel_relaxed(upper_32_bits(msi->window_base),
+ host->axi + RZG3S_PCI_MSIRCVWADRU);
+ writel_relaxed(lower_32_bits(msi->window_base) |
+ RZG3S_PCI_MSIRCVWADRL_ENA |
+ RZG3S_PCI_MSIRCVWADRL_MSG_DATA_ENA,
+ host->axi + RZG3S_PCI_MSIRCVWADRL);
+
+ /* Set MSI receive enable */
+ for (u8 reg_id = 0; reg_id < regs; reg_id++) {
+ writel_relaxed(RZG3S_PCI_MSIRE_ENA,
+ host->axi + RZG3S_PCI_MSIRE(reg_id));
+ }
+
+ /* Enable message receive interrupts */
+ writel_relaxed(RZG3S_PCI_MSGRCVIE_MSG_RCV,
+ host->axi + RZG3S_PCI_MSGRCVIE);
+
+ /* Enable MSI */
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIE,
+ RZG3S_PCI_PINTRCVIE_MSI,
+ RZG3S_PCI_PINTRCVIE_MSI);
+
+ return 0;
+}
+
+static int rzg3s_pcie_msi_setup(struct rzg3s_pcie_host *host)
+{
+ size_t size = RZG3S_PCI_MSI_INT_NR * sizeof(u32);
+ struct rzg3s_pcie_msi *msi = &host->msi;
+ struct device *dev = host->dev;
+ int id, ret;
+
+ msi->pages = __get_free_pages(GFP_KERNEL | GFP_DMA, 0);
+ if (!msi->pages)
+ return -ENOMEM;
+
+ msi->dma_addr = dma_map_single(dev, (void *)msi->pages, size * 2,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, msi->dma_addr)) {
+ ret = -ENOMEM;
+ goto free_pages;
+ }
+
+ /*
+ * According to the RZ/G3S HW manual (Rev.1.10, section 34.4.5.2 Setting
+ * the MSI Window) the MSI window needs to fall within one of the
+ * enabled AXI windows. Find an enabled AXI window to setup the MSI
+ * window.
+ */
+ for (id = 0; id < RZG3S_MAX_WINDOWS; id++) {
+ u64 base, basel, baseu;
+ u64 mask, maskl, masku;
+
+ basel = readl_relaxed(host->axi + RZG3S_PCI_AWBASEL(id));
+ /* Skip checking this AXI window if it's not enabled */
+ if (!(basel & RZG3S_PCI_AWBASEL_WIN_ENA))
+ continue;
+
+ baseu = readl_relaxed(host->axi + RZG3S_PCI_AWBASEU(id));
+ base = baseu << 32 | basel;
+
+ maskl = readl_relaxed(host->axi + RZG3S_PCI_AWMASKL(id));
+ masku = readl_relaxed(host->axi + RZG3S_PCI_AWMASKU(id));
+ mask = masku << 32 | maskl;
+
+ if (msi->dma_addr < base || msi->dma_addr > base + mask)
+ continue;
+
+ break;
+ }
+
+ if (id == RZG3S_MAX_WINDOWS) {
+ ret = -EINVAL;
+ goto dma_unmap;
+ }
+
+ /* The MSI base address must be aligned to the MSI size */
+ msi->window_base = ALIGN(msi->dma_addr, size);
+ if (msi->window_base < msi->dma_addr) {
+ ret = -EINVAL;
+ goto dma_unmap;
+ }
+
+ rzg3s_pcie_msi_hw_setup(host);
+
+ return 0;
+
+dma_unmap:
+ dma_unmap_single(dev, msi->dma_addr, size * 2, DMA_BIDIRECTIONAL);
+free_pages:
+ free_pages(msi->pages, 0);
+ return ret;
+}
+
+static void rzg3s_pcie_msi_hw_teardown(struct rzg3s_pcie_host *host)
+{
+ u8 regs = RZG3S_PCI_MSI_INT_NR / RZG3S_PCI_MSI_INT_PER_REG;
+
+ /* Disable MSI */
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIE,
+ RZG3S_PCI_PINTRCVIE_MSI, 0);
+
+ /* Disable message receive interrupts */
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_MSGRCVIE,
+ RZG3S_PCI_MSGRCVIE_MSG_RCV, 0);
+
+ /* Disable MSI receive enable */
+ for (u8 reg_id = 0; reg_id < regs; reg_id++)
+ writel_relaxed(0, host->axi + RZG3S_PCI_MSIRE(reg_id));
+
+ /* Disable MSI window */
+ writel_relaxed(0, host->axi + RZG3S_PCI_MSIRCVWADRL);
+}
+
+static void rzg3s_pcie_teardown_msi(struct rzg3s_pcie_host *host)
+{
+ size_t size = RZG3S_PCI_MSI_INT_NR * sizeof(u32);
+ struct rzg3s_pcie_msi *msi = &host->msi;
+
+ rzg3s_pcie_msi_hw_teardown(host);
+
+ free_irq(msi->irq, host);
+ irq_domain_remove(msi->domain);
+
+ /* Free unused memory */
+ dma_unmap_single(host->dev, msi->dma_addr, size * 2, DMA_BIDIRECTIONAL);
+ free_pages(msi->pages, 0);
+}
+
+static int rzg3s_pcie_init_msi(struct rzg3s_pcie_host *host)
+{
+ struct platform_device *pdev = to_platform_device(host->dev);
+ struct rzg3s_pcie_msi *msi = &host->msi;
+ struct device *dev = host->dev;
+ const char *devname;
+ int ret;
+
+ ret = devm_mutex_init(dev, &msi->map_lock);
+ if (ret)
+ return ret;
+
+ msi->irq = platform_get_irq_byname(pdev, "msi");
+ if (msi->irq < 0)
+ return dev_err_probe(dev, msi->irq, "Failed to get MSI IRQ!\n");
+
+ devname = devm_kasprintf(dev, GFP_KERNEL, "%s-msi", dev_name(dev));
+ if (!devname)
+ return -ENOMEM;
+
+ ret = rzg3s_pcie_msi_allocate_domains(msi);
+ if (ret)
+ return ret;
+
+ /*
+ * Don't use devm_request_irq() as the driver uses non-devm helpers
+ * to control clocks. Mixing them may lead to subtle bugs.
+ */
+ ret = request_irq(msi->irq, rzg3s_pcie_msi_irq, 0, devname, host);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to request IRQ: %d\n", ret);
+ goto free_domains;
+ }
+
+ ret = rzg3s_pcie_msi_setup(host);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to setup MSI!\n");
+ goto free_irq;
+ }
+
+ return 0;
+
+free_irq:
+ free_irq(msi->irq, host);
+free_domains:
+ irq_domain_remove(msi->domain);
+ return ret;
+}
+
+static void rzg3s_pcie_intx_irq_ack(struct irq_data *d)
+{
+ struct rzg3s_pcie_host *host = irq_data_get_irq_chip_data(d);
+
+ guard(raw_spinlock_irqsave)(&host->hw_lock);
+
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIS,
+ RZG3S_PCI_PINTRCVIS_INTX(d->hwirq),
+ RZG3S_PCI_PINTRCVIS_INTX(d->hwirq));
+}
+
+static void rzg3s_pcie_intx_irq_mask(struct irq_data *d)
+{
+ struct rzg3s_pcie_host *host = irq_data_get_irq_chip_data(d);
+
+ guard(raw_spinlock_irqsave)(&host->hw_lock);
+
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIE,
+ RZG3S_PCI_PINTRCVIE_INTX(d->hwirq), 0);
+}
+
+static void rzg3s_pcie_intx_irq_unmask(struct irq_data *d)
+{
+ struct rzg3s_pcie_host *host = irq_data_get_irq_chip_data(d);
+
+ guard(raw_spinlock_irqsave)(&host->hw_lock);
+
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIE,
+ RZG3S_PCI_PINTRCVIE_INTX(d->hwirq),
+ RZG3S_PCI_PINTRCVIE_INTX(d->hwirq));
+}
+
+static struct irq_chip rzg3s_pcie_intx_irq_chip = {
+ .name = "PCIe INTx",
+ .irq_ack = rzg3s_pcie_intx_irq_ack,
+ .irq_mask = rzg3s_pcie_intx_irq_mask,
+ .irq_unmask = rzg3s_pcie_intx_irq_unmask,
+};
+
+static int rzg3s_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &rzg3s_pcie_intx_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops rzg3s_pcie_intx_domain_ops = {
+ .map = rzg3s_pcie_intx_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+static int rzg3s_pcie_init_irqdomain(struct rzg3s_pcie_host *host)
+{
+ struct device *dev = host->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ for (int i = 0; i < PCI_NUM_INTX; i++) {
+ char irq_name[5] = {0};
+ int irq;
+
+ scnprintf(irq_name, ARRAY_SIZE(irq_name), "int%c", 'a' + i);
+
+ irq = platform_get_irq_byname(pdev, irq_name);
+ if (irq < 0)
+ return dev_err_probe(dev, -EINVAL,
+ "Failed to parse and map INT%c IRQ\n",
+ 'A' + i);
+
+ host->intx_irqs[i] = irq;
+ irq_set_chained_handler_and_data(irq,
+ rzg3s_pcie_intx_irq_handler,
+ host);
+ }
+
+ host->intx_domain = irq_domain_create_linear(dev_fwnode(dev),
+ PCI_NUM_INTX,
+ &rzg3s_pcie_intx_domain_ops,
+ host);
+ if (!host->intx_domain)
+ return dev_err_probe(dev, -EINVAL,
+ "Failed to add irq domain for INTx IRQs\n");
+ irq_domain_update_bus_token(host->intx_domain, DOMAIN_BUS_WIRED);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ int ret = rzg3s_pcie_init_msi(host);
+
+ if (ret) {
+ irq_domain_remove(host->intx_domain);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void rzg3s_pcie_teardown_irqdomain(struct rzg3s_pcie_host *host)
+{
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ rzg3s_pcie_teardown_msi(host);
+
+ irq_domain_remove(host->intx_domain);
+}
+
+static int rzg3s_pcie_set_max_link_speed(struct rzg3s_pcie_host *host)
+{
+ u32 remote_supported_link_speeds, max_supported_link_speeds;
+ u32 cs2, tmp, pcie_cap = RZG3S_PCI_CFG_PCIEC;
+ u32 cur_link_speed, link_speed;
+ u8 ltssm_state_l0 = 0xc;
+ int ret;
+ u16 ls;
+
+ /*
+ * According to the RZ/G3S HW manual (Rev.1.10, section 34.6.3 Caution
+ * when Changing the Speed Spontaneously) link speed change can be done
+ * only when the LTSSM is in L0.
+ */
+ ret = readl_poll_timeout(host->axi + RZG3S_PCI_PCSTAT1, tmp,
+ FIELD_GET(RZG3S_PCI_PCSTAT1_LTSSM_STATE, tmp) == ltssm_state_l0,
+ PCIE_LINK_WAIT_SLEEP_MS * MILLI,
+ PCIE_LINK_WAIT_SLEEP_MS * MILLI *
+ PCIE_LINK_WAIT_MAX_RETRIES);
+ if (ret)
+ return ret;
+
+ ls = readw_relaxed(host->pcie + pcie_cap + PCI_EXP_LNKSTA);
+ cs2 = readl_relaxed(host->axi + RZG3S_PCI_PCSTAT2);
+
+ switch (pcie_link_speed[host->max_link_speed]) {
+ case PCIE_SPEED_5_0GT:
+ max_supported_link_speeds = GENMASK(PCI_EXP_LNKSTA_CLS_5_0GB - 1, 0);
+ link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT;
+ break;
+ default:
+ /* Should not happen */
+ return -EINVAL;
+ }
+
+ cur_link_speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, ls);
+ remote_supported_link_speeds = FIELD_GET(RZG3S_PCI_PCSTAT2_SDRIRE, cs2);
+ /* Drop reserved bits */
+ remote_supported_link_speeds &= max_supported_link_speeds;
+
+ /*
+ * Return if max link speed is already set or the connected device
+ * doesn't support it.
+ */
+ if (cur_link_speed == host->max_link_speed ||
+ remote_supported_link_speeds != max_supported_link_speeds)
+ return 0;
+
+ /* Set target Link speed */
+ rzg3s_pcie_update_bits(host->pcie, pcie_cap + PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_TLS,
+ FIELD_PREP(PCI_EXP_LNKCTL2_TLS, link_speed));
+
+ /* Request link speed change */
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PCCTRL2,
+ RZG3S_PCI_PCCTRL2_LS_CHG_REQ |
+ RZG3S_PCI_PCCTRL2_LS_CHG,
+ RZG3S_PCI_PCCTRL2_LS_CHG_REQ |
+ FIELD_PREP(RZG3S_PCI_PCCTRL2_LS_CHG,
+ link_speed - 1));
+
+ ret = readl_poll_timeout(host->axi + RZG3S_PCI_PCSTAT2, cs2,
+ (cs2 & RZG3S_PCI_PCSTAT2_LS_CHG_DONE),
+ PCIE_LINK_WAIT_SLEEP_MS * MILLI,
+ PCIE_LINK_WAIT_SLEEP_MS * MILLI *
+ PCIE_LINK_WAIT_MAX_RETRIES);
+
+ /*
+ * According to the RZ/G3S HW manual (Rev.1.10, section 34.6.3 Caution
+ * when Changing the Speed Spontaneously) the PCI_PCCTRL2_LS_CHG_REQ
+ * should be de-asserted after checking for PCI_PCSTAT2_LS_CHG_DONE.
+ */
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PCCTRL2,
+ RZG3S_PCI_PCCTRL2_LS_CHG_REQ, 0);
+
+ return ret;
+}
+
+static int rzg3s_pcie_config_init(struct rzg3s_pcie_host *host)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
+ struct resource_entry *ft;
+ struct resource *bus;
+ u8 subordinate_bus;
+ u8 secondary_bus;
+ u8 primary_bus;
+
+ ft = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (!ft)
+ return -ENODEV;
+
+ bus = ft->res;
+ primary_bus = bus->start;
+ secondary_bus = bus->start + 1;
+ subordinate_bus = bus->end;
+
+ /* Enable access control to the CFGU */
+ writel_relaxed(RZG3S_PCI_PERM_CFG_HWINIT_EN,
+ host->axi + RZG3S_PCI_PERM);
+
+ /* HW manual recommends to write 0xffffffff on initialization */
+ writel_relaxed(0xffffffff, host->pcie + RZG3S_PCI_CFG_BARMSK00L);
+ writel_relaxed(0xffffffff, host->pcie + RZG3S_PCI_CFG_BARMSK00U);
+
+ /* Update bus info */
+ writeb_relaxed(primary_bus, host->pcie + PCI_PRIMARY_BUS);
+ writeb_relaxed(secondary_bus, host->pcie + PCI_SECONDARY_BUS);
+ writeb_relaxed(subordinate_bus, host->pcie + PCI_SUBORDINATE_BUS);
+
+ /* Disable access control to the CFGU */
+ writel_relaxed(0, host->axi + RZG3S_PCI_PERM);
+
+ return 0;
+}
+
+static void rzg3s_pcie_irq_init(struct rzg3s_pcie_host *host)
+{
+ /*
+ * According to the HW manual of the RZ/G3S (Rev.1.10, sections
+ * corresponding to all registers written with ~0U), the hardware
+ * ignores value written to unused bits. Writing ~0U to these registers
+ * should be safe.
+ */
+
+ /* Clear the link state and PM transitions */
+ writel_relaxed(RZG3S_PCI_PEIS0_DL_UPDOWN |
+ RZG3S_PCI_PEIS0_RX_DLLP_PM_ENTER,
+ host->axi + RZG3S_PCI_PEIS0);
+
+ /* Disable all interrupts */
+ writel_relaxed(0, host->axi + RZG3S_PCI_PEIE0);
+
+ /* Clear all parity and ecc error interrupts */
+ writel_relaxed(~0U, host->axi + RZG3S_PCI_PEIS1);
+
+ /* Disable all parity and ecc error interrupts */
+ writel_relaxed(0, host->axi + RZG3S_PCI_PEIE1);
+
+ /* Clear all AXI master error interrupts */
+ writel_relaxed(~0U, host->axi + RZG3S_PCI_AMEIS);
+
+ /* Clear all AXI slave error interrupts */
+ writel_relaxed(~0U, host->axi + RZG3S_PCI_ASEIS1);
+
+ /* Clear all message receive interrupts */
+ writel_relaxed(~0U, host->axi + RZG3S_PCI_MSGRCVIS);
+}
+
+static int rzg3s_pcie_power_resets_deassert(struct rzg3s_pcie_host *host)
+{
+ const struct rzg3s_pcie_soc_data *data = host->data;
+
+ /*
+ * According to the RZ/G3S HW manual (Rev.1.10, section
+ * 34.5.1.2 De-asserting the Reset) the PCIe IP needs to wait 5ms from
+ * power on to the de-assertion of reset.
+ */
+ fsleep(5000);
+ return reset_control_bulk_deassert(data->num_power_resets,
+ host->power_resets);
+}
+
+static int rzg3s_pcie_resets_prepare_and_get(struct rzg3s_pcie_host *host)
+{
+ const struct rzg3s_pcie_soc_data *data = host->data;
+ unsigned int i;
+ int ret;
+
+ host->power_resets = devm_kmalloc_array(host->dev,
+ data->num_power_resets,
+ sizeof(*host->power_resets),
+ GFP_KERNEL);
+ if (!host->power_resets)
+ return -ENOMEM;
+
+ for (i = 0; i < data->num_power_resets; i++)
+ host->power_resets[i].id = data->power_resets[i];
+
+ host->cfg_resets = devm_kmalloc_array(host->dev,
+ data->num_cfg_resets,
+ sizeof(*host->cfg_resets),
+ GFP_KERNEL);
+ if (!host->cfg_resets)
+ return -ENOMEM;
+
+ for (i = 0; i < data->num_cfg_resets; i++)
+ host->cfg_resets[i].id = data->cfg_resets[i];
+
+ ret = devm_reset_control_bulk_get_exclusive(host->dev,
+ data->num_power_resets,
+ host->power_resets);
+ if (ret)
+ return ret;
+
+ return devm_reset_control_bulk_get_exclusive(host->dev,
+ data->num_cfg_resets,
+ host->cfg_resets);
+}
+
+static int rzg3s_pcie_host_parse_port(struct rzg3s_pcie_host *host)
+{
+ struct device_node *of_port = of_get_next_child(host->dev->of_node, NULL);
+ struct rzg3s_pcie_port *port = &host->port;
+ int ret;
+
+ ret = of_property_read_u32(of_port, "vendor-id", &port->vendor_id);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_port, "device-id", &port->device_id);
+ if (ret)
+ return ret;
+
+ port->refclk = of_clk_get_by_name(of_port, "ref");
+ if (IS_ERR(port->refclk))
+ return PTR_ERR(port->refclk);
+
+ return 0;
+}
+
+static int rzg3s_pcie_host_init_port(struct rzg3s_pcie_host *host)
+{
+ struct rzg3s_pcie_port *port = &host->port;
+ struct device *dev = host->dev;
+ int ret;
+
+ /* Enable access control to the CFGU */
+ writel_relaxed(RZG3S_PCI_PERM_CFG_HWINIT_EN,
+ host->axi + RZG3S_PCI_PERM);
+
+ /* Update vendor ID and device ID */
+ writew_relaxed(port->vendor_id, host->pcie + PCI_VENDOR_ID);
+ writew_relaxed(port->device_id, host->pcie + PCI_DEVICE_ID);
+
+ /* Disable access control to the CFGU */
+ writel_relaxed(0, host->axi + RZG3S_PCI_PERM);
+
+ ret = clk_prepare_enable(port->refclk);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable refclk!\n");
+
+ /* Set the PHY, if any */
+ if (host->data->init_phy) {
+ ret = host->data->init_phy(host);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to set the PHY!\n");
+ goto refclk_disable;
+ }
+ }
+
+ return 0;
+
+refclk_disable:
+ clk_disable_unprepare(port->refclk);
+ return ret;
+}
+
+static int rzg3s_pcie_host_init(struct rzg3s_pcie_host *host)
+{
+ u32 val;
+ int ret;
+
+ /* Initialize the PCIe related registers */
+ ret = rzg3s_pcie_config_init(host);
+ if (ret)
+ return ret;
+
+ ret = rzg3s_pcie_host_init_port(host);
+ if (ret)
+ return ret;
+
+ /* Initialize the interrupts */
+ rzg3s_pcie_irq_init(host);
+
+ ret = reset_control_bulk_deassert(host->data->num_cfg_resets,
+ host->cfg_resets);
+ if (ret)
+ goto disable_port_refclk;
+
+ /* Wait for link up */
+ ret = readl_poll_timeout(host->axi + RZG3S_PCI_PCSTAT1, val,
+ !(val & RZG3S_PCI_PCSTAT1_DL_DOWN_STS),
+ PCIE_LINK_WAIT_SLEEP_MS * MILLI,
+ PCIE_LINK_WAIT_SLEEP_MS * MILLI *
+ PCIE_LINK_WAIT_MAX_RETRIES);
+ if (ret)
+ goto cfg_resets_deassert;
+
+ val = readl_relaxed(host->axi + RZG3S_PCI_PCSTAT2);
+ dev_info(host->dev, "PCIe link status [0x%x]\n", val);
+
+ return 0;
+
+cfg_resets_deassert:
+ reset_control_bulk_assert(host->data->num_cfg_resets,
+ host->cfg_resets);
+disable_port_refclk:
+ clk_disable_unprepare(host->port.refclk);
+ return ret;
+}
+
+static void rzg3s_pcie_set_inbound_window(struct rzg3s_pcie_host *host,
+ u64 cpu_addr, u64 pci_addr, u64 size,
+ int id)
+{
+ /* Set CPU window base address */
+ writel_relaxed(upper_32_bits(cpu_addr),
+ host->axi + RZG3S_PCI_ADESTU(id));
+ writel_relaxed(lower_32_bits(cpu_addr),
+ host->axi + RZG3S_PCI_ADESTL(id));
+
+ /* Set window size */
+ writel_relaxed(upper_32_bits(size), host->axi + RZG3S_PCI_AWMASKU(id));
+ writel_relaxed(lower_32_bits(size), host->axi + RZG3S_PCI_AWMASKL(id));
+
+ /* Set PCIe window base address and enable the window */
+ writel_relaxed(upper_32_bits(pci_addr),
+ host->axi + RZG3S_PCI_AWBASEU(id));
+ writel_relaxed(lower_32_bits(pci_addr) | RZG3S_PCI_AWBASEL_WIN_ENA,
+ host->axi + RZG3S_PCI_AWBASEL(id));
+}
+
+static int rzg3s_pcie_set_inbound_windows(struct rzg3s_pcie_host *host,
+ struct resource_entry *entry,
+ int *index)
+{
+ u64 pci_addr = entry->res->start - entry->offset;
+ u64 cpu_addr = entry->res->start;
+ u64 cpu_end = entry->res->end;
+ u64 size_id = 0;
+ int id = *index;
+ u64 size;
+
+ while (cpu_addr < cpu_end) {
+ if (id >= RZG3S_MAX_WINDOWS)
+ return dev_err_probe(host->dev, -ENOSPC,
+ "Failed to map inbound window for resource (%s)\n",
+ entry->res->name);
+
+ size = resource_size(entry->res) - size_id;
+
+ /*
+ * According to the RZ/G3S HW manual (Rev.1.10,
+ * section 34.3.1.71 AXI Window Mask (Lower) Registers) the min
+ * size is 4K.
+ */
+ size = max(size, SZ_4K);
+
+ /*
+ * According the RZ/G3S HW manual (Rev.1.10, sections:
+ * - 34.3.1.69 AXI Window Base (Lower) Registers
+ * - 34.3.1.71 AXI Window Mask (Lower) Registers
+ * - 34.3.1.73 AXI Destination (Lower) Registers)
+ * the CPU addr, PCIe addr, size should be 4K aligned and be a
+ * power of 2.
+ */
+ size = ALIGN(size, SZ_4K);
+ size = roundup_pow_of_two(size);
+
+ cpu_addr = ALIGN(cpu_addr, SZ_4K);
+ pci_addr = ALIGN(pci_addr, SZ_4K);
+
+ /*
+ * According to the RZ/G3S HW manual (Rev.1.10, section
+ * 34.3.1.71 AXI Window Mask (Lower) Registers) HW expects first
+ * 12 LSB bits to be 0xfff. Subtract 1 from size for this.
+ */
+ rzg3s_pcie_set_inbound_window(host, cpu_addr, pci_addr,
+ size - 1, id);
+
+ pci_addr += size;
+ cpu_addr += size;
+ size_id = size;
+ id++;
+ }
+ *index = id;
+
+ return 0;
+}
+
+static int rzg3s_pcie_parse_map_dma_ranges(struct rzg3s_pcie_host *host)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
+ struct resource_entry *entry;
+ int i = 0, ret;
+
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ ret = rzg3s_pcie_set_inbound_windows(host, entry, &i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rzg3s_pcie_set_outbound_window(struct rzg3s_pcie_host *host,
+ struct resource_entry *win, int id)
+{
+ struct resource *res = win->res;
+ resource_size_t size = resource_size(res);
+ resource_size_t res_start;
+
+ if (res->flags & IORESOURCE_IO)
+ res_start = pci_pio_to_address(res->start) - win->offset;
+ else
+ res_start = res->start - win->offset;
+
+ /*
+ * According to the RZ/G3S HW manual (Rev.1.10, section 34.3.1.75 PCIe
+ * Window Base (Lower) Registers) the window base address need to be 4K
+ * aligned.
+ */
+ res_start = ALIGN(res_start, SZ_4K);
+
+ size = ALIGN(size, SZ_4K);
+ size = roundup_pow_of_two(size) - 1;
+
+ /* Set PCIe destination */
+ writel_relaxed(upper_32_bits(res_start),
+ host->axi + RZG3S_PCI_PDESTU(id));
+ writel_relaxed(lower_32_bits(res_start),
+ host->axi + RZG3S_PCI_PDESTL(id));
+
+ /* Set PCIe window mask */
+ writel_relaxed(upper_32_bits(size), host->axi + RZG3S_PCI_PWMASKU(id));
+ writel_relaxed(lower_32_bits(size), host->axi + RZG3S_PCI_PWMASKL(id));
+
+ /* Set PCIe window base and enable the window */
+ writel_relaxed(upper_32_bits(res_start),
+ host->axi + RZG3S_PCI_PWBASEU(id));
+ writel_relaxed(lower_32_bits(res_start) | RZG3S_PCI_PWBASEL_ENA,
+ host->axi + RZG3S_PCI_PWBASEL(id));
+}
+
+static int rzg3s_pcie_parse_map_ranges(struct rzg3s_pcie_host *host)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
+ struct resource_entry *win;
+ int i = 0;
+
+ resource_list_for_each_entry(win, &bridge->windows) {
+ struct resource *res = win->res;
+
+ if (i >= RZG3S_MAX_WINDOWS)
+ return dev_err_probe(host->dev, -ENOSPC,
+ "Failed to map outbound window for resource (%s)\n",
+ res->name);
+
+ if (!res->flags)
+ continue;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ case IORESOURCE_MEM:
+ rzg3s_pcie_set_outbound_window(host, win, i);
+ i++;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int rzg3s_soc_pcie_init_phy(struct rzg3s_pcie_host *host)
+{
+ static const u32 xcfgd_settings[RZG3S_PCI_PHY_XCFGD_NUM] = {
+ [8] = 0xe0006801, 0x007f7e30, 0x183e0000, 0x978ff500,
+ 0xec000000, 0x009f1400, 0x0000d009,
+ [17] = 0x78000000,
+ [19] = 0x00880000, 0x000005c0, 0x07000000, 0x00780920,
+ 0xc9400ce2, 0x90000c0c, 0x000c1414, 0x00005034,
+ 0x00006000, 0x00000001,
+ };
+ static const u32 xcfga_cmn_settings[RZG3S_PCI_PHY_XCFGA_CMN_NUM] = {
+ 0x00000d10, 0x08310100, 0x00c21404, 0x013c0010, 0x01874440,
+ 0x1a216082, 0x00103440, 0x00000080, 0x00000010, 0x0c1000c1,
+ 0x1000c100, 0x0222000c, 0x00640019, 0x00a00028, 0x01d11228,
+ 0x0201001d,
+ };
+ static const u32 xcfga_rx_settings[RZG3S_PCI_PHY_XCFGA_RX_NUM] = {
+ 0x07d55000, 0x030e3f00, 0x00000288, 0x102c5880, 0x0000000b,
+ 0x04141441, 0x00641641, 0x00d63d63, 0x00641641, 0x01970377,
+ 0x00190287, 0x00190028, 0x00000028,
+ };
+ unsigned int i;
+
+ /*
+ * Enable access permission for physical layer control and status
+ * registers.
+ */
+ writel_relaxed(RZG3S_PCI_PERM_PIPE_PHY_REG_EN,
+ host->axi + RZG3S_PCI_PERM);
+
+ for (i = 0; i < RZG3S_PCI_PHY_XCFGD_NUM; i++) {
+ writel_relaxed(xcfgd_settings[i],
+ host->axi + RZG3S_PCI_PHY_XCFGD(i));
+ }
+
+ for (i = 0; i < RZG3S_PCI_PHY_XCFGA_CMN_NUM; i++) {
+ writel_relaxed(xcfga_cmn_settings[i],
+ host->axi + RZG3S_PCI_PHY_XCFGA_CMN(i));
+ }
+
+ for (i = 0; i < RZG3S_PCI_PHY_XCFGA_RX_NUM; i++) {
+ writel_relaxed(xcfga_rx_settings[i],
+ host->axi + RZG3S_PCI_PHY_XCFGA_RX(i));
+ }
+
+ writel_relaxed(0x107, host->axi + RZG3S_PCI_PHY_XCFGA_TX);
+
+ /* Select PHY settings values */
+ writel_relaxed(RZG3S_PCI_PHY_XCFG_CTRL_PHYREG_SEL,
+ host->axi + RZG3S_PCI_PHY_XCFG_CTRL);
+
+ /*
+ * Disable access permission for physical layer control and status
+ * registers.
+ */
+ writel_relaxed(0, host->axi + RZG3S_PCI_PERM);
+
+ return 0;
+}
+
+static int
+rzg3s_pcie_host_setup(struct rzg3s_pcie_host *host,
+ int (*init_irqdomain)(struct rzg3s_pcie_host *host),
+ void (*teardown_irqdomain)(struct rzg3s_pcie_host *host))
+{
+ struct device *dev = host->dev;
+ int ret;
+
+ /* Set inbound windows */
+ ret = rzg3s_pcie_parse_map_dma_ranges(host);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to set inbound windows!\n");
+
+ /* Set outbound windows */
+ ret = rzg3s_pcie_parse_map_ranges(host);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to set outbound windows!\n");
+
+ ret = init_irqdomain(host);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init IRQ domain\n");
+
+ ret = rzg3s_pcie_host_init(host);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to initialize the HW!\n");
+ goto teardown_irqdomain;
+ }
+
+ ret = rzg3s_pcie_set_max_link_speed(host);
+ if (ret)
+ dev_info(dev, "Failed to set max link speed\n");
+
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+
+ return 0;
+
+teardown_irqdomain:
+ teardown_irqdomain(host);
+
+ return ret;
+}
+
+static int rzg3s_pcie_probe(struct platform_device *pdev)
+{
+ struct pci_host_bridge *bridge;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *sysc_np __free(device_node) =
+ of_parse_phandle(np, "renesas,sysc", 0);
+ struct rzg3s_pcie_host *host;
+ int ret;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
+ if (!bridge)
+ return -ENOMEM;
+
+ host = pci_host_bridge_priv(bridge);
+ host->dev = dev;
+ host->data = device_get_match_data(dev);
+ platform_set_drvdata(pdev, host);
+
+ host->axi = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host->axi))
+ return PTR_ERR(host->axi);
+ host->pcie = host->axi + RZG3S_PCI_CFG_BASE;
+
+ host->max_link_speed = of_pci_get_max_link_speed(np);
+ if (host->max_link_speed < 0)
+ host->max_link_speed = 2;
+
+ ret = rzg3s_pcie_host_parse_port(host);
+ if (ret)
+ return ret;
+
+ host->sysc = syscon_node_to_regmap(sysc_np);
+ if (IS_ERR(host->sysc)) {
+ ret = PTR_ERR(host->sysc);
+ goto port_refclk_put;
+ }
+
+ ret = regmap_update_bits(host->sysc, RZG3S_SYS_PCIE_RST_RSM_B,
+ RZG3S_SYS_PCIE_RST_RSM_B_MASK,
+ FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 1));
+ if (ret)
+ goto port_refclk_put;
+
+ ret = rzg3s_pcie_resets_prepare_and_get(host);
+ if (ret)
+ goto sysc_signal_restore;
+
+ ret = rzg3s_pcie_power_resets_deassert(host);
+ if (ret)
+ goto sysc_signal_restore;
+
+ pm_runtime_enable(dev);
+
+ /*
+ * Controller clocks are part of a clock power domain. Enable them
+ * through runtime PM.
+ */
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ goto rpm_disable;
+
+ raw_spin_lock_init(&host->hw_lock);
+
+ ret = rzg3s_pcie_host_setup(host, rzg3s_pcie_init_irqdomain,
+ rzg3s_pcie_teardown_irqdomain);
+ if (ret)
+ goto rpm_put;
+
+ bridge->sysdata = host;
+ bridge->ops = &rzg3s_pcie_root_ops;
+ bridge->child_ops = &rzg3s_pcie_child_ops;
+ ret = pci_host_probe(bridge);
+ if (ret)
+ goto host_probe_teardown;
+
+ return 0;
+
+host_probe_teardown:
+ rzg3s_pcie_teardown_irqdomain(host);
+ reset_control_bulk_deassert(host->data->num_cfg_resets,
+ host->cfg_resets);
+rpm_put:
+ pm_runtime_put_sync(dev);
+rpm_disable:
+ pm_runtime_disable(dev);
+ reset_control_bulk_assert(host->data->num_power_resets,
+ host->power_resets);
+sysc_signal_restore:
+ /*
+ * SYSC RST_RSM_B signal need to be asserted before turning off the
+ * power to the PHY.
+ */
+ regmap_update_bits(host->sysc, RZG3S_SYS_PCIE_RST_RSM_B,
+ RZG3S_SYS_PCIE_RST_RSM_B_MASK,
+ FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 0));
+port_refclk_put:
+ clk_put(host->port.refclk);
+
+ return ret;
+}
+
+static int rzg3s_pcie_suspend_noirq(struct device *dev)
+{
+ struct rzg3s_pcie_host *host = dev_get_drvdata(dev);
+ const struct rzg3s_pcie_soc_data *data = host->data;
+ struct rzg3s_pcie_port *port = &host->port;
+ struct regmap *sysc = host->sysc;
+ int ret;
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(port->refclk);
+
+ ret = reset_control_bulk_assert(data->num_power_resets,
+ host->power_resets);
+ if (ret)
+ goto refclk_restore;
+
+ ret = reset_control_bulk_assert(data->num_cfg_resets,
+ host->cfg_resets);
+ if (ret)
+ goto power_resets_restore;
+
+ ret = regmap_update_bits(sysc, RZG3S_SYS_PCIE_RST_RSM_B,
+ RZG3S_SYS_PCIE_RST_RSM_B_MASK,
+ FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 0));
+ if (ret)
+ goto cfg_resets_restore;
+
+ return 0;
+
+ /* Restore the previous state if any error happens */
+cfg_resets_restore:
+ reset_control_bulk_deassert(data->num_cfg_resets,
+ host->cfg_resets);
+power_resets_restore:
+ reset_control_bulk_deassert(data->num_power_resets,
+ host->power_resets);
+refclk_restore:
+ clk_prepare_enable(port->refclk);
+ pm_runtime_resume_and_get(dev);
+ return ret;
+}
+
+static int rzg3s_pcie_resume_noirq(struct device *dev)
+{
+ struct rzg3s_pcie_host *host = dev_get_drvdata(dev);
+ const struct rzg3s_pcie_soc_data *data = host->data;
+ struct regmap *sysc = host->sysc;
+ int ret;
+
+ ret = regmap_update_bits(sysc, RZG3S_SYS_PCIE_RST_RSM_B,
+ RZG3S_SYS_PCIE_RST_RSM_B_MASK,
+ FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 1));
+ if (ret)
+ return ret;
+
+ ret = rzg3s_pcie_power_resets_deassert(host);
+ if (ret)
+ goto assert_rst_rsm_b;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ goto assert_power_resets;
+
+ ret = rzg3s_pcie_host_setup(host, rzg3s_pcie_msi_hw_setup,
+ rzg3s_pcie_msi_hw_teardown);
+ if (ret)
+ goto rpm_put;
+
+ return 0;
+
+ /*
+ * If any error happens there is no way to recover the IP. Put it in the
+ * lowest possible power state.
+ */
+rpm_put:
+ pm_runtime_put_sync(dev);
+assert_power_resets:
+ reset_control_bulk_assert(data->num_power_resets,
+ host->power_resets);
+assert_rst_rsm_b:
+ regmap_update_bits(sysc, RZG3S_SYS_PCIE_RST_RSM_B,
+ RZG3S_SYS_PCIE_RST_RSM_B_MASK,
+ FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 0));
+ return ret;
+}
+
+static const struct dev_pm_ops rzg3s_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(rzg3s_pcie_suspend_noirq,
+ rzg3s_pcie_resume_noirq)
+};
+
+static const char * const rzg3s_soc_power_resets[] = {
+ "aresetn", "rst_cfg_b", "rst_load_b",
+};
+
+static const char * const rzg3s_soc_cfg_resets[] = {
+ "rst_b", "rst_ps_b", "rst_gp_b", "rst_rsm_b",
+};
+
+static const struct rzg3s_pcie_soc_data rzg3s_soc_data = {
+ .power_resets = rzg3s_soc_power_resets,
+ .num_power_resets = ARRAY_SIZE(rzg3s_soc_power_resets),
+ .cfg_resets = rzg3s_soc_cfg_resets,
+ .num_cfg_resets = ARRAY_SIZE(rzg3s_soc_cfg_resets),
+ .init_phy = rzg3s_soc_pcie_init_phy,
+};
+
+static const struct of_device_id rzg3s_pcie_of_match[] = {
+ {
+ .compatible = "renesas,r9a08g045-pcie",
+ .data = &rzg3s_soc_data,
+ },
+ {}
+};
+
+static struct platform_driver rzg3s_pcie_driver = {
+ .driver = {
+ .name = "rzg3s-pcie-host",
+ .of_match_table = rzg3s_pcie_of_match,
+ .pm = pm_ptr(&rzg3s_pcie_pm_ops),
+ .suppress_bind_attrs = true,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = rzg3s_pcie_probe,
+};
+builtin_platform_driver(rzg3s_pcie_driver);
diff --git a/drivers/pci/controller/pcie-xilinx-common.h b/drivers/pci/controller/pcie-xilinx-common.h
new file mode 100644
index 000000000000..1832770f3308
--- /dev/null
+++ b/drivers/pci/controller/pcie-xilinx-common.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) Copyright 2023, Xilinx, Inc.
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+
+/* Interrupt registers definitions */
+#define XILINX_PCIE_INTR_LINK_DOWN 0
+#define XILINX_PCIE_INTR_HOT_RESET 3
+#define XILINX_PCIE_INTR_CFG_PCIE_TIMEOUT 4
+#define XILINX_PCIE_INTR_CFG_TIMEOUT 8
+#define XILINX_PCIE_INTR_CORRECTABLE 9
+#define XILINX_PCIE_INTR_NONFATAL 10
+#define XILINX_PCIE_INTR_FATAL 11
+#define XILINX_PCIE_INTR_CFG_ERR_POISON 12
+#define XILINX_PCIE_INTR_PME_TO_ACK_RCVD 15
+#define XILINX_PCIE_INTR_INTX 16
+#define XILINX_PCIE_INTR_PM_PME_RCVD 17
+#define XILINX_PCIE_INTR_MSI 17
+#define XILINX_PCIE_INTR_SLV_UNSUPP 20
+#define XILINX_PCIE_INTR_SLV_UNEXP 21
+#define XILINX_PCIE_INTR_SLV_COMPL 22
+#define XILINX_PCIE_INTR_SLV_ERRP 23
+#define XILINX_PCIE_INTR_SLV_CMPABT 24
+#define XILINX_PCIE_INTR_SLV_ILLBUR 25
+#define XILINX_PCIE_INTR_MST_DECERR 26
+#define XILINX_PCIE_INTR_MST_SLVERR 27
+#define XILINX_PCIE_INTR_SLV_PCIE_TIMEOUT 28
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index 4a787a941674..d38f27e20761 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -16,11 +16,9 @@
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/pci-ecam.h>
#include "../pci.h"
+#include "pcie-xilinx-common.h"
/* Register definitions */
#define XILINX_CPM_PCIE_REG_IDR 0x00000E10
@@ -32,35 +30,16 @@
#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
-#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
-
-#define XILINX_CPM_PCIE_IR_STATUS 0x000002A0
-#define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8
-#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
-
-/* Interrupt registers definitions */
-#define XILINX_CPM_PCIE_INTR_LINK_DOWN 0
-#define XILINX_CPM_PCIE_INTR_HOT_RESET 3
-#define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT 4
-#define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT 8
-#define XILINX_CPM_PCIE_INTR_CORRECTABLE 9
-#define XILINX_CPM_PCIE_INTR_NONFATAL 10
-#define XILINX_CPM_PCIE_INTR_FATAL 11
-#define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON 12
-#define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD 15
-#define XILINX_CPM_PCIE_INTR_INTX 16
-#define XILINX_CPM_PCIE_INTR_PM_PME_RCVD 17
-#define XILINX_CPM_PCIE_INTR_SLV_UNSUPP 20
-#define XILINX_CPM_PCIE_INTR_SLV_UNEXP 21
-#define XILINX_CPM_PCIE_INTR_SLV_COMPL 22
-#define XILINX_CPM_PCIE_INTR_SLV_ERRP 23
-#define XILINX_CPM_PCIE_INTR_SLV_CMPABT 24
-#define XILINX_CPM_PCIE_INTR_SLV_ILLBUR 25
-#define XILINX_CPM_PCIE_INTR_MST_DECERR 26
-#define XILINX_CPM_PCIE_INTR_MST_SLVERR 27
-#define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT 28
-
-#define IMR(x) BIT(XILINX_CPM_PCIE_INTR_ ##x)
+#define XILINX_CPM_PCIE0_MISC_IR_LOCAL BIT(1)
+#define XILINX_CPM_PCIE1_MISC_IR_LOCAL BIT(2)
+
+#define XILINX_CPM_PCIE0_IR_STATUS 0x000002A0
+#define XILINX_CPM_PCIE1_IR_STATUS 0x000002B4
+#define XILINX_CPM_PCIE0_IR_ENABLE 0x000002A8
+#define XILINX_CPM_PCIE1_IR_ENABLE 0x000002BC
+#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
+
+#define IMR(x) BIT(XILINX_PCIE_INTR_ ##x)
#define XILINX_CPM_PCIE_IMR_ALL_MASK \
( \
@@ -104,14 +83,22 @@
enum xilinx_cpm_version {
CPM,
CPM5,
+ CPM5_HOST1,
+ CPM5NC_HOST,
};
/**
* struct xilinx_cpm_variant - CPM variant information
* @version: CPM version
+ * @ir_status: Offset for the error interrupt status register
+ * @ir_enable: Offset for the CPM5 local error interrupt enable register
+ * @ir_misc_value: A bitmask for the miscellaneous interrupt status
*/
struct xilinx_cpm_variant {
enum xilinx_cpm_version version;
+ u32 ir_status;
+ u32 ir_enable;
+ u32 ir_misc_value;
};
/**
@@ -293,6 +280,7 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
{
struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
+ const struct xilinx_cpm_variant *variant = port->variant;
unsigned long val;
int i;
@@ -303,11 +291,11 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
generic_handle_domain_irq(port->cpm_domain, i);
pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
- if (port->variant->version == CPM5) {
- val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS);
+ if (variant->ir_status) {
+ val = readl_relaxed(port->cpm_base + variant->ir_status);
if (val)
writel_relaxed(val, port->cpm_base +
- XILINX_CPM_PCIE_IR_STATUS);
+ variant->ir_status);
}
/*
@@ -323,7 +311,7 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
}
#define _IC(x, s) \
- [XILINX_CPM_PCIE_INTR_ ## x] = { __stringify(x), s }
+ [XILINX_PCIE_INTR_ ## x] = { __stringify(x), s }
static const struct {
const char *sym;
@@ -359,9 +347,9 @@ static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id)
d = irq_domain_get_irq_data(port->cpm_domain, irq);
switch (d->hwirq) {
- case XILINX_CPM_PCIE_INTR_CORRECTABLE:
- case XILINX_CPM_PCIE_INTR_NONFATAL:
- case XILINX_CPM_PCIE_INTR_FATAL:
+ case XILINX_PCIE_INTR_CORRECTABLE:
+ case XILINX_PCIE_INTR_NONFATAL:
+ case XILINX_PCIE_INTR_FATAL:
cpm_pcie_clear_err_interrupts(port);
fallthrough;
@@ -407,17 +395,15 @@ static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port)
return -EINVAL;
}
- port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32,
- &event_domain_ops,
- port);
+ port->cpm_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, port);
if (!port->cpm_domain)
goto out;
irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS);
- port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops,
- port);
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
if (!port->intx_domain)
goto out;
@@ -466,7 +452,7 @@ static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port)
}
port->intx_irq = irq_create_mapping(port->cpm_domain,
- XILINX_CPM_PCIE_INTR_INTX);
+ XILINX_PCIE_INTR_INTX);
if (!port->intx_irq) {
dev_err(dev, "Failed to map INTx interrupt\n");
return -ENXIO;
@@ -489,6 +475,11 @@ static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port)
*/
static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
{
+ const struct xilinx_cpm_variant *variant = port->variant;
+
+ if (variant->version == CPM5NC_HOST)
+ return;
+
if (cpm_pcie_link_up(port))
dev_info(port->dev, "PCIe Link is UP\n");
else
@@ -507,15 +498,15 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
* XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
* CPM SLCR block.
*/
- writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
+ writel(variant->ir_misc_value,
port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
- if (port->variant->version == CPM5) {
+ if (variant->ir_enable) {
writel(XILINX_CPM_PCIE_IR_LOCAL,
- port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE);
+ port->cpm_base + variant->ir_enable);
}
- /* Enable the Bridge enable bit */
+ /* Set Bridge enable bit */
pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
XILINX_CPM_PCIE_REG_RPSC_BEN,
XILINX_CPM_PCIE_REG_RPSC);
@@ -549,7 +540,8 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,
if (IS_ERR(port->cfg))
return PTR_ERR(port->cfg);
- if (port->variant->version == CPM5) {
+ if (port->variant->version == CPM5 ||
+ port->variant->version == CPM5_HOST1) {
port->reg_base = devm_platform_ioremap_resource_byname(pdev,
"cpm_csr");
if (IS_ERR(port->reg_base))
@@ -589,28 +581,34 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
port->dev = dev;
- err = xilinx_cpm_pcie_init_irq_domain(port);
- if (err)
- return err;
+ port->variant = of_device_get_match_data(dev);
- bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
- if (!bus)
- return -ENODEV;
+ if (port->variant->version != CPM5NC_HOST) {
+ err = xilinx_cpm_pcie_init_irq_domain(port);
+ if (err)
+ return err;
+ }
- port->variant = of_device_get_match_data(dev);
+ bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (!bus) {
+ err = -ENODEV;
+ goto err_free_irq_domains;
+ }
err = xilinx_cpm_pcie_parse_dt(port, bus->res);
if (err) {
dev_err(dev, "Parsing DT failed\n");
- goto err_parse_dt;
+ goto err_free_irq_domains;
}
xilinx_cpm_pcie_init_port(port);
- err = xilinx_cpm_setup_irq(port);
- if (err) {
- dev_err(dev, "Failed to set up interrupts\n");
- goto err_setup_irq;
+ if (port->variant->version != CPM5NC_HOST) {
+ err = xilinx_cpm_setup_irq(port);
+ if (err) {
+ dev_err(dev, "Failed to set up interrupts\n");
+ goto err_setup_irq;
+ }
}
bridge->sysdata = port->cfg;
@@ -623,20 +621,37 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
return 0;
err_host_bridge:
- xilinx_cpm_free_interrupts(port);
+ if (port->variant->version != CPM5NC_HOST)
+ xilinx_cpm_free_interrupts(port);
err_setup_irq:
pci_ecam_free(port->cfg);
-err_parse_dt:
- xilinx_cpm_free_irq_domains(port);
+err_free_irq_domains:
+ if (port->variant->version != CPM5NC_HOST)
+ xilinx_cpm_free_irq_domains(port);
return err;
}
static const struct xilinx_cpm_variant cpm_host = {
.version = CPM,
+ .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL,
};
static const struct xilinx_cpm_variant cpm5_host = {
.version = CPM5,
+ .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL,
+ .ir_status = XILINX_CPM_PCIE0_IR_STATUS,
+ .ir_enable = XILINX_CPM_PCIE0_IR_ENABLE,
+};
+
+static const struct xilinx_cpm_variant cpm5_host1 = {
+ .version = CPM5_HOST1,
+ .ir_misc_value = XILINX_CPM_PCIE1_MISC_IR_LOCAL,
+ .ir_status = XILINX_CPM_PCIE1_IR_STATUS,
+ .ir_enable = XILINX_CPM_PCIE1_IR_ENABLE,
+};
+
+static const struct xilinx_cpm_variant cpm5n_host = {
+ .version = CPM5NC_HOST,
};
static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
@@ -648,6 +663,14 @@ static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
.compatible = "xlnx,versal-cpm5-host",
.data = &cpm5_host,
},
+ {
+ .compatible = "xlnx,versal-cpm5-host1",
+ .data = &cpm5_host1,
+ },
+ {
+ .compatible = "xlnx,versal-cpm5nc-host",
+ .data = &cpm5n_host,
+ },
{}
};
diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c
new file mode 100644
index 000000000000..b037c8f315e4
--- /dev/null
+++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c
@@ -0,0 +1,847 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCIe host controller driver for Xilinx XDMA PCIe Bridge
+ *
+ * Copyright (C) 2023 Xilinx, Inc. All rights reserved.
+ */
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+
+#include "../pci.h"
+#include "pcie-xilinx-common.h"
+
+/* Register definitions */
+#define XILINX_PCIE_DMA_REG_IDR 0x00000138
+#define XILINX_PCIE_DMA_REG_IMR 0x0000013c
+#define XILINX_PCIE_DMA_REG_PSCR 0x00000144
+#define XILINX_PCIE_DMA_REG_RPSC 0x00000148
+#define XILINX_PCIE_DMA_REG_MSIBASE1 0x0000014c
+#define XILINX_PCIE_DMA_REG_MSIBASE2 0x00000150
+#define XILINX_PCIE_DMA_REG_RPEFR 0x00000154
+#define XILINX_PCIE_DMA_REG_IDRN 0x00000160
+#define XILINX_PCIE_DMA_REG_IDRN_MASK 0x00000164
+#define XILINX_PCIE_DMA_REG_MSI_LOW 0x00000170
+#define XILINX_PCIE_DMA_REG_MSI_HI 0x00000174
+#define XILINX_PCIE_DMA_REG_MSI_LOW_MASK 0x00000178
+#define XILINX_PCIE_DMA_REG_MSI_HI_MASK 0x0000017c
+
+#define IMR(x) BIT(XILINX_PCIE_INTR_ ##x)
+
+#define XILINX_PCIE_INTR_IMR_ALL_MASK \
+ ( \
+ IMR(LINK_DOWN) | \
+ IMR(HOT_RESET) | \
+ IMR(CFG_TIMEOUT) | \
+ IMR(CORRECTABLE) | \
+ IMR(NONFATAL) | \
+ IMR(FATAL) | \
+ IMR(INTX) | \
+ IMR(MSI) | \
+ IMR(SLV_UNSUPP) | \
+ IMR(SLV_UNEXP) | \
+ IMR(SLV_COMPL) | \
+ IMR(SLV_ERRP) | \
+ IMR(SLV_CMPABT) | \
+ IMR(SLV_ILLBUR) | \
+ IMR(MST_DECERR) | \
+ IMR(MST_SLVERR) | \
+ )
+
+#define XILINX_PCIE_DMA_IMR_ALL_MASK 0x0ff30fe9
+#define XILINX_PCIE_DMA_IDR_ALL_MASK 0xffffffff
+#define XILINX_PCIE_DMA_IDRN_MASK GENMASK(19, 16)
+
+/* Root Port Error Register definitions */
+#define XILINX_PCIE_DMA_RPEFR_ERR_VALID BIT(18)
+#define XILINX_PCIE_DMA_RPEFR_REQ_ID GENMASK(15, 0)
+#define XILINX_PCIE_DMA_RPEFR_ALL_MASK 0xffffffff
+
+/* Root Port Interrupt Register definitions */
+#define XILINX_PCIE_DMA_IDRN_SHIFT 16
+
+/* Root Port Status/control Register definitions */
+#define XILINX_PCIE_DMA_REG_RPSC_BEN BIT(0)
+
+/* Phy Status/Control Register definitions */
+#define XILINX_PCIE_DMA_REG_PSCR_LNKUP BIT(11)
+#define QDMA_BRIDGE_BASE_OFF 0xcd8
+
+/* Number of MSI IRQs */
+#define XILINX_NUM_MSI_IRQS 64
+
+enum xilinx_pl_dma_version {
+ XDMA,
+ QDMA,
+};
+
+/**
+ * struct xilinx_pl_dma_variant - PL DMA PCIe variant information
+ * @version: DMA version
+ */
+struct xilinx_pl_dma_variant {
+ enum xilinx_pl_dma_version version;
+};
+
+struct xilinx_msi {
+ unsigned long *bitmap;
+ struct irq_domain *dev_domain;
+ struct mutex lock; /* Protect bitmap variable */
+ int irq_msi0;
+ int irq_msi1;
+};
+
+/**
+ * struct pl_dma_pcie - PCIe port information
+ * @dev: Device pointer
+ * @reg_base: IO Mapped Register Base
+ * @cfg_base: IO Mapped Configuration Base
+ * @irq: Interrupt number
+ * @cfg: Holds mappings of config space window
+ * @phys_reg_base: Physical address of reg base
+ * @intx_domain: Legacy IRQ domain pointer
+ * @pldma_domain: PL DMA IRQ domain pointer
+ * @resources: Bus Resources
+ * @msi: MSI information
+ * @intx_irq: INTx error interrupt number
+ * @lock: Lock protecting shared register access
+ * @variant: PL DMA PCIe version check pointer
+ */
+struct pl_dma_pcie {
+ struct device *dev;
+ void __iomem *reg_base;
+ void __iomem *cfg_base;
+ int irq;
+ struct pci_config_window *cfg;
+ phys_addr_t phys_reg_base;
+ struct irq_domain *intx_domain;
+ struct irq_domain *pldma_domain;
+ struct list_head resources;
+ struct xilinx_msi msi;
+ int intx_irq;
+ raw_spinlock_t lock;
+ const struct xilinx_pl_dma_variant *variant;
+};
+
+static inline u32 pcie_read(struct pl_dma_pcie *port, u32 reg)
+{
+ if (port->variant->version == QDMA)
+ return readl(port->reg_base + reg + QDMA_BRIDGE_BASE_OFF);
+
+ return readl(port->reg_base + reg);
+}
+
+static inline void pcie_write(struct pl_dma_pcie *port, u32 val, u32 reg)
+{
+ if (port->variant->version == QDMA)
+ writel(val, port->reg_base + reg + QDMA_BRIDGE_BASE_OFF);
+ else
+ writel(val, port->reg_base + reg);
+}
+
+static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie *port)
+{
+ return (pcie_read(port, XILINX_PCIE_DMA_REG_PSCR) &
+ XILINX_PCIE_DMA_REG_PSCR_LNKUP) ? true : false;
+}
+
+static void xilinx_pl_dma_pcie_clear_err_interrupts(struct pl_dma_pcie *port)
+{
+ unsigned long val = pcie_read(port, XILINX_PCIE_DMA_REG_RPEFR);
+
+ if (val & XILINX_PCIE_DMA_RPEFR_ERR_VALID) {
+ dev_dbg(port->dev, "Requester ID %lu\n",
+ val & XILINX_PCIE_DMA_RPEFR_REQ_ID);
+ pcie_write(port, XILINX_PCIE_DMA_RPEFR_ALL_MASK,
+ XILINX_PCIE_DMA_REG_RPEFR);
+ }
+}
+
+static bool xilinx_pl_dma_pcie_valid_device(struct pci_bus *bus,
+ unsigned int devfn)
+{
+ struct pl_dma_pcie *port = bus->sysdata;
+
+ if (!pci_is_root_bus(bus)) {
+ /*
+ * Checking whether the link is up is the last line of
+ * defense, and this check is inherently racy by definition.
+ * Sending a PIO request to a downstream device when the link is
+ * down causes an unrecoverable error, and a reset of the entire
+ * PCIe controller will be needed. We can reduce the likelihood
+ * of that unrecoverable error by checking whether the link is
+ * up, but we can't completely prevent it because the link may
+ * go down between the link-up check and the PIO request.
+ */
+ if (!xilinx_pl_dma_pcie_link_up(port))
+ return false;
+ } else if (devfn > 0)
+ /* Only one device down on each root port */
+ return false;
+
+ return true;
+}
+
+static void __iomem *xilinx_pl_dma_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct pl_dma_pcie *port = bus->sysdata;
+
+ if (!xilinx_pl_dma_pcie_valid_device(bus, devfn))
+ return NULL;
+
+ if (port->variant->version == QDMA)
+ return port->cfg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+
+ return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+}
+
+/* PCIe operations */
+static struct pci_ecam_ops xilinx_pl_dma_pcie_ops = {
+ .pci_ops = {
+ .map_bus = xilinx_pl_dma_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+static void xilinx_pl_dma_pcie_enable_msi(struct pl_dma_pcie *port)
+{
+ phys_addr_t msi_addr = port->phys_reg_base;
+
+ pcie_write(port, upper_32_bits(msi_addr), XILINX_PCIE_DMA_REG_MSIBASE1);
+ pcie_write(port, lower_32_bits(msi_addr), XILINX_PCIE_DMA_REG_MSIBASE2);
+}
+
+static void xilinx_mask_intx_irq(struct irq_data *data)
+{
+ struct pl_dma_pcie *port = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 mask, val;
+
+ mask = BIT(data->hwirq + XILINX_PCIE_DMA_IDRN_SHIFT);
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = pcie_read(port, XILINX_PCIE_DMA_REG_IDRN_MASK);
+ pcie_write(port, (val & (~mask)), XILINX_PCIE_DMA_REG_IDRN_MASK);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void xilinx_unmask_intx_irq(struct irq_data *data)
+{
+ struct pl_dma_pcie *port = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 mask, val;
+
+ mask = BIT(data->hwirq + XILINX_PCIE_DMA_IDRN_SHIFT);
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = pcie_read(port, XILINX_PCIE_DMA_REG_IDRN_MASK);
+ pcie_write(port, (val | mask), XILINX_PCIE_DMA_REG_IDRN_MASK);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip xilinx_leg_irq_chip = {
+ .name = "pl_dma:INTx",
+ .irq_mask = xilinx_mask_intx_irq,
+ .irq_unmask = xilinx_unmask_intx_irq,
+};
+
+static int xilinx_pl_dma_pcie_intx_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &xilinx_leg_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+/* INTx IRQ Domain operations */
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = xilinx_pl_dma_pcie_intx_map,
+};
+
+static irqreturn_t xilinx_pl_dma_pcie_msi_handler_high(int irq, void *args)
+{
+ struct xilinx_msi *msi;
+ unsigned long status;
+ u32 bit, virq;
+ struct pl_dma_pcie *port = args;
+
+ msi = &port->msi;
+
+ while ((status = pcie_read(port, XILINX_PCIE_DMA_REG_MSI_HI)) != 0) {
+ for_each_set_bit(bit, &status, 32) {
+ pcie_write(port, 1 << bit, XILINX_PCIE_DMA_REG_MSI_HI);
+ bit = bit + 32;
+ virq = irq_find_mapping(msi->dev_domain, bit);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xilinx_pl_dma_pcie_msi_handler_low(int irq, void *args)
+{
+ struct pl_dma_pcie *port = args;
+ struct xilinx_msi *msi;
+ unsigned long status;
+ u32 bit, virq;
+
+ msi = &port->msi;
+
+ while ((status = pcie_read(port, XILINX_PCIE_DMA_REG_MSI_LOW)) != 0) {
+ for_each_set_bit(bit, &status, 32) {
+ pcie_write(port, 1 << bit, XILINX_PCIE_DMA_REG_MSI_LOW);
+ virq = irq_find_mapping(msi->dev_domain, bit);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xilinx_pl_dma_pcie_event_flow(int irq, void *args)
+{
+ struct pl_dma_pcie *port = args;
+ unsigned long val;
+ int i;
+
+ val = pcie_read(port, XILINX_PCIE_DMA_REG_IDR);
+ val &= pcie_read(port, XILINX_PCIE_DMA_REG_IMR);
+ for_each_set_bit(i, &val, 32)
+ generic_handle_domain_irq(port->pldma_domain, i);
+
+ pcie_write(port, val, XILINX_PCIE_DMA_REG_IDR);
+
+ return IRQ_HANDLED;
+}
+
+#define _IC(x, s) \
+ [XILINX_PCIE_INTR_ ## x] = { __stringify(x), s }
+
+static const struct {
+ const char *sym;
+ const char *str;
+} intr_cause[32] = {
+ _IC(LINK_DOWN, "Link Down"),
+ _IC(HOT_RESET, "Hot reset"),
+ _IC(CFG_TIMEOUT, "ECAM access timeout"),
+ _IC(CORRECTABLE, "Correctable error message"),
+ _IC(NONFATAL, "Non fatal error message"),
+ _IC(FATAL, "Fatal error message"),
+ _IC(SLV_UNSUPP, "Slave unsupported request"),
+ _IC(SLV_UNEXP, "Slave unexpected completion"),
+ _IC(SLV_COMPL, "Slave completion timeout"),
+ _IC(SLV_ERRP, "Slave Error Poison"),
+ _IC(SLV_CMPABT, "Slave Completer Abort"),
+ _IC(SLV_ILLBUR, "Slave Illegal Burst"),
+ _IC(MST_DECERR, "Master decode error"),
+ _IC(MST_SLVERR, "Master slave error"),
+};
+
+static irqreturn_t xilinx_pl_dma_pcie_intr_handler(int irq, void *dev_id)
+{
+ struct pl_dma_pcie *port = (struct pl_dma_pcie *)dev_id;
+ struct device *dev = port->dev;
+ struct irq_data *d;
+
+ d = irq_domain_get_irq_data(port->pldma_domain, irq);
+ switch (d->hwirq) {
+ case XILINX_PCIE_INTR_CORRECTABLE:
+ case XILINX_PCIE_INTR_NONFATAL:
+ case XILINX_PCIE_INTR_FATAL:
+ xilinx_pl_dma_pcie_clear_err_interrupts(port);
+ fallthrough;
+
+ default:
+ if (intr_cause[d->hwirq].str)
+ dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
+ else
+ dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define XILINX_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define XILINX_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops xilinx_msi_parent_ops = {
+ .required_flags = XILINX_MSI_FLAGS_REQUIRED,
+ .supported_flags = XILINX_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "pl_dma-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(data);
+ phys_addr_t msi_addr = pcie->phys_reg_base;
+
+ msg->address_lo = lower_32_bits(msi_addr);
+ msg->address_hi = upper_32_bits(msi_addr);
+ msg->data = data->hwirq;
+}
+
+static struct irq_chip xilinx_irq_chip = {
+ .name = "pl_dma:MSI",
+ .irq_compose_msi_msg = xilinx_compose_msi_msg,
+};
+
+static int xilinx_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct pl_dma_pcie *pcie = domain->host_data;
+ struct xilinx_msi *msi = &pcie->msi;
+ int bit, i;
+
+ mutex_lock(&msi->lock);
+ bit = bitmap_find_free_region(msi->bitmap, XILINX_NUM_MSI_IRQS,
+ get_count_order(nr_irqs));
+ if (bit < 0) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, bit + i, &xilinx_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+ }
+ mutex_unlock(&msi->lock);
+
+ return 0;
+}
+
+static void xilinx_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+ struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct xilinx_msi *msi = &pcie->msi;
+
+ mutex_lock(&msi->lock);
+ bitmap_release_region(msi->bitmap, data->hwirq,
+ get_count_order(nr_irqs));
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops dev_msi_domain_ops = {
+ .alloc = xilinx_irq_domain_alloc,
+ .free = xilinx_irq_domain_free,
+};
+
+static void xilinx_pl_dma_pcie_free_irq_domains(struct pl_dma_pcie *port)
+{
+ struct xilinx_msi *msi = &port->msi;
+
+ if (port->intx_domain) {
+ irq_domain_remove(port->intx_domain);
+ port->intx_domain = NULL;
+ }
+
+ if (msi->dev_domain) {
+ irq_domain_remove(msi->dev_domain);
+ msi->dev_domain = NULL;
+ }
+}
+
+static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port)
+{
+ struct device *dev = port->dev;
+ struct xilinx_msi *msi = &port->msi;
+ int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(port->dev),
+ .ops = &dev_msi_domain_ops,
+ .host_data = port,
+ .size = XILINX_NUM_MSI_IRQS,
+ };
+
+ msi->dev_domain = msi_create_parent_irq_domain(&info, &xilinx_msi_parent_ops);
+ if (!msi->dev_domain)
+ goto out;
+
+ mutex_init(&msi->lock);
+ msi->bitmap = kzalloc(size, GFP_KERNEL);
+ if (!msi->bitmap)
+ goto out;
+
+ raw_spin_lock_init(&port->lock);
+ xilinx_pl_dma_pcie_enable_msi(port);
+
+ return 0;
+
+out:
+ xilinx_pl_dma_pcie_free_irq_domains(port);
+ dev_err(dev, "Failed to allocate MSI IRQ domains\n");
+
+ return -ENOMEM;
+}
+
+/*
+ * INTx error interrupts are Xilinx controller specific interrupt, used to
+ * notify user about errors such as cfg timeout, slave unsupported requests,
+ * fatal and non fatal error etc.
+ */
+
+static irqreturn_t xilinx_pl_dma_pcie_intx_flow(int irq, void *args)
+{
+ unsigned long val;
+ int i;
+ struct pl_dma_pcie *port = args;
+
+ val = FIELD_GET(XILINX_PCIE_DMA_IDRN_MASK,
+ pcie_read(port, XILINX_PCIE_DMA_REG_IDRN));
+
+ for_each_set_bit(i, &val, PCI_NUM_INTX)
+ generic_handle_domain_irq(port->intx_domain, i);
+ return IRQ_HANDLED;
+}
+
+static void xilinx_pl_dma_pcie_mask_event_irq(struct irq_data *d)
+{
+ struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ raw_spin_lock(&port->lock);
+ val = pcie_read(port, XILINX_PCIE_DMA_REG_IMR);
+ val &= ~BIT(d->hwirq);
+ pcie_write(port, val, XILINX_PCIE_DMA_REG_IMR);
+ raw_spin_unlock(&port->lock);
+}
+
+static void xilinx_pl_dma_pcie_unmask_event_irq(struct irq_data *d)
+{
+ struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ raw_spin_lock(&port->lock);
+ val = pcie_read(port, XILINX_PCIE_DMA_REG_IMR);
+ val |= BIT(d->hwirq);
+ pcie_write(port, val, XILINX_PCIE_DMA_REG_IMR);
+ raw_spin_unlock(&port->lock);
+}
+
+static struct irq_chip xilinx_pl_dma_pcie_event_irq_chip = {
+ .name = "pl_dma:RC-Event",
+ .irq_mask = xilinx_pl_dma_pcie_mask_event_irq,
+ .irq_unmask = xilinx_pl_dma_pcie_unmask_event_irq,
+};
+
+static int xilinx_pl_dma_pcie_event_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &xilinx_pl_dma_pcie_event_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops event_domain_ops = {
+ .map = xilinx_pl_dma_pcie_event_map,
+};
+
+/**
+ * xilinx_pl_dma_pcie_init_irq_domain - Initialize IRQ domain
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure.
+ */
+static int xilinx_pl_dma_pcie_init_irq_domain(struct pl_dma_pcie *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+ int ret;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_child_by_name(node, "interrupt-controller");
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -EINVAL;
+ }
+
+ port->pldma_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, port);
+ if (!port->pldma_domain)
+ return -ENOMEM;
+
+ irq_domain_update_bus_token(port->pldma_domain, DOMAIN_BUS_NEXUS);
+
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
+ if (!port->intx_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
+
+ ret = xilinx_pl_dma_pcie_init_msi_irq_domain(port);
+ if (ret != 0) {
+ irq_domain_remove(port->intx_domain);
+ return -ENOMEM;
+ }
+
+ of_node_put(pcie_intc_node);
+ raw_spin_lock_init(&port->lock);
+
+ return 0;
+}
+
+static int xilinx_pl_dma_pcie_setup_irq(struct pl_dma_pcie *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int i, irq, err;
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0)
+ return port->irq;
+
+ for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
+ int err;
+
+ if (!intr_cause[i].str)
+ continue;
+
+ irq = irq_create_mapping(port->pldma_domain, i);
+ if (!irq) {
+ dev_err(dev, "Failed to map interrupt\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, irq,
+ xilinx_pl_dma_pcie_intr_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ intr_cause[i].sym, port);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d\n", irq);
+ return err;
+ }
+ }
+
+ port->intx_irq = irq_create_mapping(port->pldma_domain,
+ XILINX_PCIE_INTR_INTX);
+ if (!port->intx_irq) {
+ dev_err(dev, "Failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, port->intx_irq, xilinx_pl_dma_pcie_intx_flow,
+ IRQF_SHARED | IRQF_NO_THREAD, NULL, port);
+ if (err) {
+ dev_err(dev, "Failed to request INTx IRQ %d\n", port->intx_irq);
+ return err;
+ }
+
+ err = devm_request_irq(dev, port->irq, xilinx_pl_dma_pcie_event_flow,
+ IRQF_SHARED | IRQF_NO_THREAD, NULL, port);
+ if (err) {
+ dev_err(dev, "Failed to request event IRQ %d\n", port->irq);
+ return err;
+ }
+
+ return 0;
+}
+
+static void xilinx_pl_dma_pcie_init_port(struct pl_dma_pcie *port)
+{
+ if (xilinx_pl_dma_pcie_link_up(port))
+ dev_info(port->dev, "PCIe Link is UP\n");
+ else
+ dev_info(port->dev, "PCIe Link is DOWN\n");
+
+ /* Disable all interrupts */
+ pcie_write(port, ~XILINX_PCIE_DMA_IDR_ALL_MASK,
+ XILINX_PCIE_DMA_REG_IMR);
+
+ /* Clear pending interrupts */
+ pcie_write(port, pcie_read(port, XILINX_PCIE_DMA_REG_IDR) &
+ XILINX_PCIE_DMA_IMR_ALL_MASK,
+ XILINX_PCIE_DMA_REG_IDR);
+
+ /* Needed for MSI DECODE MODE */
+ pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK,
+ XILINX_PCIE_DMA_REG_MSI_LOW_MASK);
+ pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK,
+ XILINX_PCIE_DMA_REG_MSI_HI_MASK);
+
+ /* Set the Bridge enable bit */
+ pcie_write(port, pcie_read(port, XILINX_PCIE_DMA_REG_RPSC) |
+ XILINX_PCIE_DMA_REG_RPSC_BEN,
+ XILINX_PCIE_DMA_REG_RPSC);
+}
+
+static int xilinx_request_msi_irq(struct pl_dma_pcie *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ port->msi.irq_msi0 = platform_get_irq_byname(pdev, "msi0");
+ if (port->msi.irq_msi0 <= 0)
+ return port->msi.irq_msi0;
+
+ ret = devm_request_irq(dev, port->msi.irq_msi0, xilinx_pl_dma_pcie_msi_handler_low,
+ IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl",
+ port);
+ if (ret) {
+ dev_err(dev, "Failed to register interrupt\n");
+ return ret;
+ }
+
+ port->msi.irq_msi1 = platform_get_irq_byname(pdev, "msi1");
+ if (port->msi.irq_msi1 <= 0)
+ return port->msi.irq_msi1;
+
+ ret = devm_request_irq(dev, port->msi.irq_msi1, xilinx_pl_dma_pcie_msi_handler_high,
+ IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl",
+ port);
+ if (ret) {
+ dev_err(dev, "Failed to register interrupt\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xilinx_pl_dma_pcie_parse_dt(struct pl_dma_pcie *port,
+ struct resource *bus_range)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+ int err;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Missing \"reg\" property\n");
+ return -ENXIO;
+ }
+ port->phys_reg_base = res->start;
+
+ port->cfg = pci_ecam_create(dev, res, bus_range, &xilinx_pl_dma_pcie_ops);
+ if (IS_ERR(port->cfg))
+ return PTR_ERR(port->cfg);
+
+ port->reg_base = port->cfg->win;
+
+ if (port->variant->version == QDMA) {
+ port->cfg_base = port->cfg->win;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
+ port->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+ port->phys_reg_base = res->start;
+ }
+
+ err = xilinx_request_msi_irq(port);
+ if (err) {
+ pci_ecam_free(port->cfg);
+ return err;
+ }
+
+ return 0;
+}
+
+static int xilinx_pl_dma_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pl_dma_pcie *port;
+ struct pci_host_bridge *bridge;
+ struct resource_entry *bus;
+ int err;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ if (!bridge)
+ return -ENODEV;
+
+ port = pci_host_bridge_priv(bridge);
+
+ port->dev = dev;
+
+ bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (!bus)
+ return -ENODEV;
+
+ port->variant = of_device_get_match_data(dev);
+
+ err = xilinx_pl_dma_pcie_parse_dt(port, bus->res);
+ if (err) {
+ dev_err(dev, "Parsing DT failed\n");
+ return err;
+ }
+
+ xilinx_pl_dma_pcie_init_port(port);
+
+ err = xilinx_pl_dma_pcie_init_irq_domain(port);
+ if (err)
+ goto err_irq_domain;
+
+ err = xilinx_pl_dma_pcie_setup_irq(port);
+
+ bridge->sysdata = port;
+ bridge->ops = &xilinx_pl_dma_pcie_ops.pci_ops;
+
+ err = pci_host_probe(bridge);
+ if (err < 0)
+ goto err_host_bridge;
+
+ return 0;
+
+err_host_bridge:
+ xilinx_pl_dma_pcie_free_irq_domains(port);
+
+err_irq_domain:
+ pci_ecam_free(port->cfg);
+ return err;
+}
+
+static const struct xilinx_pl_dma_variant xdma_host = {
+ .version = XDMA,
+};
+
+static const struct xilinx_pl_dma_variant qdma_host = {
+ .version = QDMA,
+};
+
+static const struct of_device_id xilinx_pl_dma_pcie_of_match[] = {
+ {
+ .compatible = "xlnx,xdma-host-3.00",
+ .data = &xdma_host,
+ },
+ {
+ .compatible = "xlnx,qdma-host-3.00",
+ .data = &qdma_host,
+ },
+ {}
+};
+
+static struct platform_driver xilinx_pl_dma_pcie_driver = {
+ .driver = {
+ .name = "xilinx-xdma-pcie",
+ .of_match_table = xilinx_pl_dma_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = xilinx_pl_dma_pcie_probe,
+};
+
+builtin_platform_driver(xilinx_pl_dma_pcie_driver);
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 176686bdb15c..7db2c96c6cec 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -19,6 +20,7 @@
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/irqchip/chained_irq.h>
@@ -80,8 +82,8 @@
#define MSGF_MISC_SR_NON_FATAL_DEV BIT(22)
#define MSGF_MISC_SR_FATAL_DEV BIT(23)
#define MSGF_MISC_SR_LINK_DOWN BIT(24)
-#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25)
-#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26)
+#define MSGF_MISC_SR_LINK_AUTO_BWIDTH BIT(25)
+#define MSGF_MISC_SR_LINK_BWIDTH BIT(26)
#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \
MSGF_MISC_SR_RXMSG_OVER | \
@@ -96,8 +98,8 @@
MSGF_MISC_SR_NON_FATAL_DEV | \
MSGF_MISC_SR_FATAL_DEV | \
MSGF_MISC_SR_LINK_DOWN | \
- MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
- MSGF_MSIC_SR_LINK_BWIDTH)
+ MSGF_MISC_SR_LINK_AUTO_BWIDTH | \
+ MSGF_MISC_SR_LINK_BWIDTH)
/* Legacy interrupt status mask bits */
#define MSGF_LEG_SR_INTA BIT(0)
@@ -126,7 +128,7 @@
#define E_ECAM_CR_ENABLE BIT(0)
#define E_ECAM_SIZE_LOC GENMASK(20, 16)
#define E_ECAM_SIZE_SHIFT 16
-#define NWL_ECAM_VALUE_DEFAULT 12
+#define NWL_ECAM_MAX_SIZE 16
#define CFG_DMA_REG_BAR GENMASK(2, 0)
#define CFG_PCIE_CACHE GENMASK(7, 0)
@@ -144,7 +146,6 @@
#define LINK_WAIT_USLEEP_MAX 100000
struct nwl_msi { /* MSI information */
- struct irq_domain *msi_domain;
DECLARE_BITMAP(bitmap, INT_PCI_MSI_NR);
struct irq_domain *dev_domain;
struct mutex lock; /* protect bitmap variable */
@@ -157,6 +158,7 @@ struct nwl_pcie {
void __iomem *breg_base;
void __iomem *pcireg_base;
void __iomem *ecam_base;
+ struct phy *phy[4];
phys_addr_t phys_breg_base; /* Physical Bridge Register Base */
phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */
phys_addr_t phys_ecam_base; /* Physical Configuration Base */
@@ -165,10 +167,8 @@ struct nwl_pcie {
u32 ecam_size;
int irq_intx;
int irq_misc;
- u32 ecam_value;
- u8 last_busno;
struct nwl_msi msi;
- struct irq_domain *legacy_irq_domain;
+ struct irq_domain *intx_irq_domain;
struct clk *clk;
raw_spinlock_t leg_mask_lock;
};
@@ -269,42 +269,42 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
return IRQ_NONE;
if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
- dev_err(dev, "Received Message FIFO Overflow\n");
+ dev_err_ratelimited(dev, "Received Message FIFO Overflow\n");
if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
- dev_err(dev, "Slave error\n");
+ dev_err_ratelimited(dev, "Slave error\n");
if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
- dev_err(dev, "Master error\n");
+ dev_err_ratelimited(dev, "Master error\n");
if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
- dev_err(dev, "In Misc Ingress address translation error\n");
+ dev_err_ratelimited(dev, "In Misc Ingress address translation error\n");
if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
- dev_err(dev, "In Misc Egress address translation error\n");
+ dev_err_ratelimited(dev, "In Misc Egress address translation error\n");
if (misc_stat & MSGF_MISC_SR_FATAL_AER)
- dev_err(dev, "Fatal Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
- dev_err(dev, "Non-Fatal Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Non-Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_CORR_AER)
- dev_err(dev, "Correctable Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Correctable Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_UR_DETECT)
- dev_err(dev, "Unsupported request Detected\n");
+ dev_err_ratelimited(dev, "Unsupported request Detected\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
- dev_err(dev, "Non-Fatal Error Detected\n");
+ dev_err_ratelimited(dev, "Non-Fatal Error Detected\n");
if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
- dev_err(dev, "Fatal Error Detected\n");
+ dev_err_ratelimited(dev, "Fatal Error Detected\n");
- if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
+ if (misc_stat & MSGF_MISC_SR_LINK_AUTO_BWIDTH)
dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
- if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
+ if (misc_stat & MSGF_MISC_SR_LINK_BWIDTH)
dev_info(dev, "Link Bandwidth Management Status bit set\n");
/* Clear misc interrupt status */
@@ -326,7 +326,7 @@ static void nwl_pcie_leg_handler(struct irq_desc *desc)
while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
MSGF_LEG_SR_MASKALL) != 0) {
for_each_set_bit(bit, &status, PCI_NUM_INTX)
- generic_handle_domain_irq(pcie->legacy_irq_domain, bit);
+ generic_handle_domain_irq(pcie->intx_irq_domain, bit);
}
chained_irq_exit(chip, desc);
@@ -366,71 +366,74 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void nwl_mask_leg_irq(struct irq_data *data)
+static void nwl_mask_intx_irq(struct irq_data *data)
{
struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
unsigned long flags;
u32 mask;
u32 val;
- mask = 1 << (data->hwirq - 1);
+ mask = 1 << data->hwirq;
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
}
-static void nwl_unmask_leg_irq(struct irq_data *data)
+static void nwl_unmask_intx_irq(struct irq_data *data)
{
struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
unsigned long flags;
u32 mask;
u32 val;
- mask = 1 << (data->hwirq - 1);
+ mask = 1 << data->hwirq;
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
}
-static struct irq_chip nwl_leg_irq_chip = {
+static struct irq_chip nwl_intx_irq_chip = {
.name = "nwl_pcie:legacy",
- .irq_enable = nwl_unmask_leg_irq,
- .irq_disable = nwl_mask_leg_irq,
- .irq_mask = nwl_mask_leg_irq,
- .irq_unmask = nwl_unmask_leg_irq,
+ .irq_enable = nwl_unmask_intx_irq,
+ .irq_disable = nwl_mask_intx_irq,
+ .irq_mask = nwl_mask_intx_irq,
+ .irq_unmask = nwl_unmask_intx_irq,
};
-static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
+static int nwl_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
{
- irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(irq, &nwl_intx_irq_chip, handle_level_irq);
irq_set_chip_data(irq, domain->host_data);
irq_set_status_flags(irq, IRQ_LEVEL);
return 0;
}
-static const struct irq_domain_ops legacy_domain_ops = {
- .map = nwl_legacy_map,
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = nwl_intx_map,
.xlate = pci_irqd_intx_xlate,
};
#ifdef CONFIG_PCI_MSI
-static struct irq_chip nwl_msi_irq_chip = {
- .name = "nwl_pcie:msi",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-static struct msi_domain_info nwl_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
- .chip = &nwl_msi_irq_chip,
+#define NWL_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define NWL_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops nwl_msi_parent_ops = {
+ .required_flags = NWL_MSI_FLAGS_REQUIRED,
+ .supported_flags = NWL_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "nwl-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
+
#endif
static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -443,16 +446,9 @@ static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = data->hwirq;
}
-static int nwl_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip nwl_irq_chip = {
.name = "Xilinx MSI",
.irq_compose_msi_msg = nwl_compose_msi_msg,
- .irq_set_affinity = nwl_msi_set_affinity,
};
static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -502,45 +498,93 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
{
#ifdef CONFIG_PCI_MSI
struct device *dev = pcie->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
struct nwl_msi *msi = &pcie->msi;
-
- msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
- &dev_msi_domain_ops, pcie);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &dev_msi_domain_ops,
+ .host_data = pcie,
+ .size = INT_PCI_MSI_NR,
+ };
+
+ msi->dev_domain = msi_create_parent_irq_domain(&info, &nwl_msi_parent_ops);
if (!msi->dev_domain) {
dev_err(dev, "failed to create dev IRQ domain\n");
return -ENOMEM;
}
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &nwl_msi_domain_info,
- msi->dev_domain);
- if (!msi->msi_domain) {
- dev_err(dev, "failed to create msi IRQ domain\n");
- irq_domain_remove(msi->dev_domain);
- return -ENOMEM;
- }
#endif
return 0;
}
+static void nwl_pcie_phy_power_off(struct nwl_pcie *pcie, int i)
+{
+ int err = phy_power_off(pcie->phy[i]);
+
+ if (err)
+ dev_err(pcie->dev, "could not power off phy %d (err=%d)\n", i,
+ err);
+}
+
+static void nwl_pcie_phy_exit(struct nwl_pcie *pcie, int i)
+{
+ int err = phy_exit(pcie->phy[i]);
+
+ if (err)
+ dev_err(pcie->dev, "could not exit phy %d (err=%d)\n", i, err);
+}
+
+static int nwl_pcie_phy_enable(struct nwl_pcie *pcie)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) {
+ ret = phy_init(pcie->phy[i]);
+ if (ret)
+ goto err;
+
+ ret = phy_power_on(pcie->phy[i]);
+ if (ret) {
+ nwl_pcie_phy_exit(pcie, i);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ while (i--) {
+ nwl_pcie_phy_power_off(pcie, i);
+ nwl_pcie_phy_exit(pcie, i);
+ }
+
+ return ret;
+}
+
+static void nwl_pcie_phy_disable(struct nwl_pcie *pcie)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(pcie->phy); i--;) {
+ nwl_pcie_phy_power_off(pcie, i);
+ nwl_pcie_phy_exit(pcie, i);
+ }
+}
+
static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
{
struct device *dev = pcie->dev;
struct device_node *node = dev->of_node;
- struct device_node *legacy_intc_node;
+ struct device_node *intc_node;
- legacy_intc_node = of_get_next_child(node, NULL);
- if (!legacy_intc_node) {
+ intc_node = of_get_next_child(node, NULL);
+ if (!intc_node) {
dev_err(dev, "No legacy intc node found\n");
return -EINVAL;
}
- pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node,
- PCI_NUM_INTX,
- &legacy_domain_ops,
- pcie);
- of_node_put(legacy_intc_node);
- if (!pcie->legacy_irq_domain) {
+ pcie->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
+ of_node_put(intc_node);
+ if (!pcie->intx_irq_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
@@ -625,7 +669,7 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
{
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- u32 breg_val, ecam_val, first_busno = 0;
+ u32 breg_val, ecam_val;
int err;
breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT;
@@ -674,24 +718,16 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
E_ECAM_CR_ENABLE, E_ECAM_CONTROL);
- nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
- (pcie->ecam_value << E_ECAM_SIZE_SHIFT),
- E_ECAM_CONTROL);
+ ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL);
+ ecam_val &= ~E_ECAM_SIZE_LOC;
+ ecam_val |= NWL_ECAM_MAX_SIZE << E_ECAM_SIZE_SHIFT;
+ nwl_bridge_writel(pcie, ecam_val, E_ECAM_CONTROL);
nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base),
E_ECAM_BASE_LO);
nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_ecam_base),
E_ECAM_BASE_HI);
- /* Get bus range */
- ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL);
- pcie->last_busno = (ecam_val & E_ECAM_SIZE_LOC) >> E_ECAM_SIZE_SHIFT;
- /* Write primary, secondary and subordinate bus numbers */
- ecam_val = first_busno;
- ecam_val |= (first_busno + 1) << 8;
- ecam_val |= (pcie->last_busno << E_ECAM_SIZE_SHIFT);
- writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS));
-
if (nwl_pcie_link_up(pcie))
dev_info(dev, "Link is UP\n");
else
@@ -721,14 +757,14 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
/* Enable all misc interrupts */
nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
- /* Disable all legacy interrupts */
+ /* Disable all INTX interrupts */
nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
- /* Clear pending legacy interrupts */
+ /* Clear pending INTX interrupts */
nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS);
- /* Enable all legacy interrupts */
+ /* Enable all INTX interrupts */
nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
/* Enable the bridge config interrupt */
@@ -743,6 +779,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
{
struct device *dev = pcie->dev;
struct resource *res;
+ int i;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
pcie->breg_base = devm_ioremap_resource(dev, res);
@@ -770,6 +807,18 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
irq_set_chained_handler_and_data(pcie->irq_intx,
nwl_pcie_leg_handler, pcie);
+
+ for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) {
+ pcie->phy[i] = devm_of_phy_get_by_index(dev, dev->of_node, i);
+ if (PTR_ERR(pcie->phy[i]) == -ENODEV) {
+ pcie->phy[i] = NULL;
+ break;
+ }
+
+ if (IS_ERR(pcie->phy[i]))
+ return PTR_ERR(pcie->phy[i]);
+ }
+
return 0;
}
@@ -790,9 +839,9 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return -ENODEV;
pcie = pci_host_bridge_priv(bridge);
+ platform_set_drvdata(pdev, pcie);
pcie->dev = dev;
- pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
err = nwl_pcie_parse_dt(pcie, pdev);
if (err) {
@@ -810,16 +859,22 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
+ err = nwl_pcie_phy_enable(pcie);
+ if (err) {
+ dev_err(dev, "could not enable PHYs\n");
+ goto err_clk;
+ }
+
err = nwl_pcie_bridge_init(pcie);
if (err) {
dev_err(dev, "HW Initialization failed\n");
- return err;
+ goto err_phy;
}
err = nwl_pcie_init_irq_domain(pcie);
if (err) {
dev_err(dev, "Failed creating IRQ Domain\n");
- return err;
+ goto err_phy;
}
bridge->sysdata = pcie;
@@ -829,11 +884,27 @@ static int nwl_pcie_probe(struct platform_device *pdev)
err = nwl_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(dev, "failed to enable MSI support: %d\n", err);
- return err;
+ goto err_phy;
}
}
- return pci_host_probe(bridge);
+ err = pci_host_probe(bridge);
+ if (!err)
+ return 0;
+
+err_phy:
+ nwl_pcie_phy_disable(pcie);
+err_clk:
+ clk_disable_unprepare(pcie->clk);
+ return err;
+}
+
+static void nwl_pcie_remove(struct platform_device *pdev)
+{
+ struct nwl_pcie *pcie = platform_get_drvdata(pdev);
+
+ nwl_pcie_phy_disable(pcie);
+ clk_disable_unprepare(pcie->clk);
}
static struct platform_driver nwl_pcie_driver = {
@@ -843,5 +914,6 @@ static struct platform_driver nwl_pcie_driver = {
.of_match_table = nwl_pcie_of_match,
},
.probe = nwl_pcie_probe,
+ .remove = nwl_pcie_remove,
};
builtin_platform_driver(nwl_pcie_driver);
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index cb6e9f7b0152..937ea6ae1ac4 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -203,16 +204,6 @@ static void xilinx_msi_top_irq_ack(struct irq_data *d)
*/
}
-static struct irq_chip xilinx_msi_top_chip = {
- .name = "PCIe MSI",
- .irq_ack = xilinx_msi_top_irq_ack,
-};
-
-static int xilinx_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data);
@@ -225,7 +216,6 @@ static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
static struct irq_chip xilinx_msi_bottom_chip = {
.name = "Xilinx MSI",
- .irq_set_affinity = xilinx_msi_set_affinity,
.irq_compose_msi_msg = xilinx_compose_msi_msg,
};
@@ -270,28 +260,42 @@ static const struct irq_domain_ops xilinx_msi_domain_ops = {
.free = xilinx_msi_domain_free,
};
-static struct msi_domain_info xilinx_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
- .chip = &xilinx_msi_top_chip,
+static bool xilinx_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent, struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+ return false;
+
+ chip->irq_ack = xilinx_msi_top_irq_ack;
+ return true;
+}
+
+#define XILINX_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+static const struct msi_parent_ops xilinx_msi_parent_ops = {
+ .required_flags = XILINX_MSI_FLAGS_REQUIRED,
+ .supported_flags = MSI_GENERIC_FLAGS_MASK,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "xilinx-",
+ .init_dev_msi_info = xilinx_init_dev_msi_info,
};
static int xilinx_allocate_msi_domains(struct xilinx_pcie *pcie)
{
- struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
- struct irq_domain *parent;
-
- parent = irq_domain_create_linear(fwnode, XILINX_NUM_MSI_IRQS,
- &xilinx_msi_domain_ops, pcie);
- if (!parent) {
- dev_err(pcie->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
-
- pcie->msi_domain = pci_msi_create_irq_domain(fwnode, &xilinx_msi_info, parent);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(pcie->dev),
+ .ops = &xilinx_msi_domain_ops,
+ .host_data = pcie,
+ .size = XILINX_NUM_MSI_IRQS,
+ };
+
+ pcie->msi_domain = msi_create_parent_irq_domain(&info, &xilinx_msi_parent_ops);
if (!pcie->msi_domain) {
dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
return -ENOMEM;
}
@@ -300,10 +304,7 @@ static int xilinx_allocate_msi_domains(struct xilinx_pcie *pcie)
static void xilinx_free_msi_domains(struct xilinx_pcie *pcie)
{
- struct irq_domain *parent = pcie->msi_domain->parent;
-
irq_domain_remove(pcie->msi_domain);
- irq_domain_remove(parent);
}
/* INTx Functions */
@@ -399,7 +400,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
val = pcie_read(pcie, XILINX_PCIE_REG_RPIFR2) &
XILINX_PCIE_RPIFR2_MSG_DATA;
- domain = pcie->msi_domain->parent;
+ domain = pcie->msi_domain;
} else {
val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
XILINX_PCIE_RPIFR1_INTR_SHIFT;
@@ -466,9 +467,8 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie *pcie)
return -ENODEV;
}
- pcie->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops,
- pcie);
+ pcie->leg_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
of_node_put(pcie_intc_node);
if (!pcie->leg_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/controller/plda/Kconfig b/drivers/pci/controller/plda/Kconfig
new file mode 100644
index 000000000000..62120101139c
--- /dev/null
+++ b/drivers/pci/controller/plda/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "PLDA-based PCIe controllers"
+ depends on PCI
+
+config PCIE_PLDA_HOST
+ bool
+ select IRQ_MSI_LIB
+
+config PCIE_MICROCHIP_HOST
+ tristate "Microchip AXI PCIe controller"
+ depends on PCI_MSI && OF
+ select PCI_HOST_COMMON
+ select PCIE_PLDA_HOST
+ help
+ Say Y here if you want kernel to support the Microchip AXI PCIe
+ Host Bridge driver.
+
+config PCIE_STARFIVE_HOST
+ tristate "StarFive PCIe host controller"
+ depends on PCI_MSI && OF
+ depends on ARCH_STARFIVE || COMPILE_TEST
+ select PCIE_PLDA_HOST
+ help
+ Say Y here if you want to support the StarFive PCIe controller in
+ host mode. StarFive PCIe controller uses PLDA PCIe core.
+
+ If you choose to build this driver as module it will be dynamically
+ linked and module will be called pcie-starfive.ko.
+
+endmenu
diff --git a/drivers/pci/controller/plda/Makefile b/drivers/pci/controller/plda/Makefile
new file mode 100644
index 000000000000..0ac6851bed48
--- /dev/null
+++ b/drivers/pci/controller/plda/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_PLDA_HOST) += pcie-plda-host.o
+obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o
+obj-$(CONFIG_PCIE_STARFIVE_HOST) += pcie-starfive.o
diff --git a/drivers/pci/controller/plda/pcie-microchip-host.c b/drivers/pci/controller/plda/pcie-microchip-host.c
new file mode 100644
index 000000000000..24bbf93b8051
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-microchip-host.c
@@ -0,0 +1,834 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip AXI PCIe Bridge host controller driver
+ *
+ * Copyright (c) 2018 - 2020 Microchip Corporation. All rights reserved.
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ */
+
+#include <linux/align.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+#include <linux/wordpart.h>
+
+#include "../../pci.h"
+#include "../pci-host-common.h"
+#include "pcie-plda.h"
+
+#define MC_MAX_NUM_INBOUND_WINDOWS 8
+#define MPFS_NC_BOUNCE_ADDR 0x80000000
+
+/* PCIe Bridge Phy and Controller Phy offsets */
+#define MC_PCIE1_BRIDGE_ADDR 0x00008000u
+#define MC_PCIE1_CTRL_ADDR 0x0000a000u
+
+/* PCIe Controller Phy Regs */
+#define SEC_ERROR_EVENT_CNT 0x20
+#define DED_ERROR_EVENT_CNT 0x24
+#define SEC_ERROR_INT 0x28
+#define SEC_ERROR_INT_TX_RAM_SEC_ERR_INT GENMASK(3, 0)
+#define SEC_ERROR_INT_RX_RAM_SEC_ERR_INT GENMASK(7, 4)
+#define SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT GENMASK(11, 8)
+#define SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT GENMASK(15, 12)
+#define SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT GENMASK(15, 0)
+#define NUM_SEC_ERROR_INTS (4)
+#define SEC_ERROR_INT_MASK 0x2c
+#define DED_ERROR_INT 0x30
+#define DED_ERROR_INT_TX_RAM_DED_ERR_INT GENMASK(3, 0)
+#define DED_ERROR_INT_RX_RAM_DED_ERR_INT GENMASK(7, 4)
+#define DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT GENMASK(11, 8)
+#define DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT GENMASK(15, 12)
+#define DED_ERROR_INT_ALL_RAM_DED_ERR_INT GENMASK(15, 0)
+#define NUM_DED_ERROR_INTS (4)
+#define DED_ERROR_INT_MASK 0x34
+#define ECC_CONTROL 0x38
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_0 BIT(0)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_1 BIT(1)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_2 BIT(2)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_3 BIT(3)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_0 BIT(4)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_1 BIT(5)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_2 BIT(6)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_3 BIT(7)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_0 BIT(8)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_1 BIT(9)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_2 BIT(10)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_3 BIT(11)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_0 BIT(12)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_1 BIT(13)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_2 BIT(14)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_3 BIT(15)
+#define ECC_CONTROL_TX_RAM_ECC_BYPASS BIT(24)
+#define ECC_CONTROL_RX_RAM_ECC_BYPASS BIT(25)
+#define ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS BIT(26)
+#define ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS BIT(27)
+#define PCIE_EVENT_INT 0x14c
+#define PCIE_EVENT_INT_L2_EXIT_INT BIT(0)
+#define PCIE_EVENT_INT_HOTRST_EXIT_INT BIT(1)
+#define PCIE_EVENT_INT_DLUP_EXIT_INT BIT(2)
+#define PCIE_EVENT_INT_MASK GENMASK(2, 0)
+#define PCIE_EVENT_INT_L2_EXIT_INT_MASK BIT(16)
+#define PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK BIT(17)
+#define PCIE_EVENT_INT_DLUP_EXIT_INT_MASK BIT(18)
+#define PCIE_EVENT_INT_ENB_MASK GENMASK(18, 16)
+#define PCIE_EVENT_INT_ENB_SHIFT 16
+#define NUM_PCIE_EVENTS (3)
+
+/* PCIe Config space MSI capability structure */
+#define MC_MSI_CAP_CTRL_OFFSET 0xe0u
+
+/* Events */
+#define EVENT_PCIE_L2_EXIT 0
+#define EVENT_PCIE_HOTRST_EXIT 1
+#define EVENT_PCIE_DLUP_EXIT 2
+#define EVENT_SEC_TX_RAM_SEC_ERR 3
+#define EVENT_SEC_RX_RAM_SEC_ERR 4
+#define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR 5
+#define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR 6
+#define EVENT_DED_TX_RAM_DED_ERR 7
+#define EVENT_DED_RX_RAM_DED_ERR 8
+#define EVENT_DED_PCIE2AXI_RAM_DED_ERR 9
+#define EVENT_DED_AXI2PCIE_RAM_DED_ERR 10
+#define EVENT_LOCAL_DMA_END_ENGINE_0 11
+#define EVENT_LOCAL_DMA_END_ENGINE_1 12
+#define EVENT_LOCAL_DMA_ERROR_ENGINE_0 13
+#define EVENT_LOCAL_DMA_ERROR_ENGINE_1 14
+#define NUM_MC_EVENTS 15
+#define EVENT_LOCAL_A_ATR_EVT_POST_ERR (NUM_MC_EVENTS + PLDA_AXI_POST_ERR)
+#define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR (NUM_MC_EVENTS + PLDA_AXI_FETCH_ERR)
+#define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR (NUM_MC_EVENTS + PLDA_AXI_DISCARD_ERR)
+#define EVENT_LOCAL_A_ATR_EVT_DOORBELL (NUM_MC_EVENTS + PLDA_AXI_DOORBELL)
+#define EVENT_LOCAL_P_ATR_EVT_POST_ERR (NUM_MC_EVENTS + PLDA_PCIE_POST_ERR)
+#define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR (NUM_MC_EVENTS + PLDA_PCIE_FETCH_ERR)
+#define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR (NUM_MC_EVENTS + PLDA_PCIE_DISCARD_ERR)
+#define EVENT_LOCAL_P_ATR_EVT_DOORBELL (NUM_MC_EVENTS + PLDA_PCIE_DOORBELL)
+#define EVENT_LOCAL_PM_MSI_INT_INTX (NUM_MC_EVENTS + PLDA_INTX)
+#define EVENT_LOCAL_PM_MSI_INT_MSI (NUM_MC_EVENTS + PLDA_MSI)
+#define EVENT_LOCAL_PM_MSI_INT_AER_EVT (NUM_MC_EVENTS + PLDA_AER_EVENT)
+#define EVENT_LOCAL_PM_MSI_INT_EVENTS (NUM_MC_EVENTS + PLDA_MISC_EVENTS)
+#define EVENT_LOCAL_PM_MSI_INT_SYS_ERR (NUM_MC_EVENTS + PLDA_SYS_ERR)
+#define NUM_EVENTS (NUM_MC_EVENTS + PLDA_INT_EVENT_NUM)
+
+#define PCIE_EVENT_CAUSE(x, s) \
+ [EVENT_PCIE_ ## x] = { __stringify(x), s }
+
+#define SEC_ERROR_CAUSE(x, s) \
+ [EVENT_SEC_ ## x] = { __stringify(x), s }
+
+#define DED_ERROR_CAUSE(x, s) \
+ [EVENT_DED_ ## x] = { __stringify(x), s }
+
+#define LOCAL_EVENT_CAUSE(x, s) \
+ [EVENT_LOCAL_ ## x] = { __stringify(x), s }
+
+#define PCIE_EVENT(x) \
+ .offset = PCIE_EVENT_INT, \
+ .mask_offset = PCIE_EVENT_INT, \
+ .mask_high = 1, \
+ .mask = PCIE_EVENT_INT_ ## x ## _INT, \
+ .enb_mask = PCIE_EVENT_INT_ENB_MASK
+
+#define SEC_EVENT(x) \
+ .offset = SEC_ERROR_INT, \
+ .mask_offset = SEC_ERROR_INT_MASK, \
+ .mask = SEC_ERROR_INT_ ## x ## _INT, \
+ .mask_high = 1, \
+ .enb_mask = 0
+
+#define DED_EVENT(x) \
+ .offset = DED_ERROR_INT, \
+ .mask_offset = DED_ERROR_INT_MASK, \
+ .mask_high = 1, \
+ .mask = DED_ERROR_INT_ ## x ## _INT, \
+ .enb_mask = 0
+
+#define LOCAL_EVENT(x) \
+ .offset = ISTATUS_LOCAL, \
+ .mask_offset = IMASK_LOCAL, \
+ .mask_high = 0, \
+ .mask = x ## _MASK, \
+ .enb_mask = 0
+
+#define PCIE_EVENT_TO_EVENT_MAP(x) \
+ { PCIE_EVENT_INT_ ## x ## _INT, EVENT_PCIE_ ## x }
+
+#define SEC_ERROR_TO_EVENT_MAP(x) \
+ { SEC_ERROR_INT_ ## x ## _INT, EVENT_SEC_ ## x }
+
+#define DED_ERROR_TO_EVENT_MAP(x) \
+ { DED_ERROR_INT_ ## x ## _INT, EVENT_DED_ ## x }
+
+#define LOCAL_STATUS_TO_EVENT_MAP(x) \
+ { x ## _MASK, EVENT_LOCAL_ ## x }
+
+struct event_map {
+ u32 reg_mask;
+ u32 event_bit;
+};
+
+
+struct mc_pcie {
+ struct plda_pcie_rp plda;
+ void __iomem *bridge_base_addr;
+ void __iomem *ctrl_base_addr;
+};
+
+struct cause {
+ const char *sym;
+ const char *str;
+};
+
+static const struct cause event_cause[NUM_EVENTS] = {
+ PCIE_EVENT_CAUSE(L2_EXIT, "L2 exit event"),
+ PCIE_EVENT_CAUSE(HOTRST_EXIT, "Hot reset exit event"),
+ PCIE_EVENT_CAUSE(DLUP_EXIT, "DLUP exit event"),
+ SEC_ERROR_CAUSE(TX_RAM_SEC_ERR, "sec error in tx buffer"),
+ SEC_ERROR_CAUSE(RX_RAM_SEC_ERR, "sec error in rx buffer"),
+ SEC_ERROR_CAUSE(PCIE2AXI_RAM_SEC_ERR, "sec error in pcie2axi buffer"),
+ SEC_ERROR_CAUSE(AXI2PCIE_RAM_SEC_ERR, "sec error in axi2pcie buffer"),
+ DED_ERROR_CAUSE(TX_RAM_DED_ERR, "ded error in tx buffer"),
+ DED_ERROR_CAUSE(RX_RAM_DED_ERR, "ded error in rx buffer"),
+ DED_ERROR_CAUSE(PCIE2AXI_RAM_DED_ERR, "ded error in pcie2axi buffer"),
+ DED_ERROR_CAUSE(AXI2PCIE_RAM_DED_ERR, "ded error in axi2pcie buffer"),
+ LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_0, "dma engine 0 error"),
+ LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_1, "dma engine 1 error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_POST_ERR, "axi write request error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_FETCH_ERR, "axi read request error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_DISCARD_ERR, "axi read timeout"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_POST_ERR, "pcie write request error"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_FETCH_ERR, "pcie read request error"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_DISCARD_ERR, "pcie read timeout"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_AER_EVT, "aer event"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_EVENTS, "pm/ltr/hotplug event"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"),
+};
+
+static struct event_map pcie_event_to_event[] = {
+ PCIE_EVENT_TO_EVENT_MAP(L2_EXIT),
+ PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT),
+ PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT),
+};
+
+static struct event_map sec_error_to_event[] = {
+ SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR),
+};
+
+static struct event_map ded_error_to_event[] = {
+ DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR),
+};
+
+static struct event_map local_status_to_event[] = {
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_1),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_POST_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_FETCH_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DISCARD_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DOORBELL),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_POST_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_FETCH_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DISCARD_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DOORBELL),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_INTX),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_MSI),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_AER_EVT),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_EVENTS),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_SYS_ERR),
+};
+
+static struct {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 enb_mask;
+ u32 mask_high;
+ u32 mask_offset;
+} event_descs[] = {
+ { PCIE_EVENT(L2_EXIT) },
+ { PCIE_EVENT(HOTRST_EXIT) },
+ { PCIE_EVENT(DLUP_EXIT) },
+ { SEC_EVENT(TX_RAM_SEC_ERR) },
+ { SEC_EVENT(RX_RAM_SEC_ERR) },
+ { SEC_EVENT(PCIE2AXI_RAM_SEC_ERR) },
+ { SEC_EVENT(AXI2PCIE_RAM_SEC_ERR) },
+ { DED_EVENT(TX_RAM_DED_ERR) },
+ { DED_EVENT(RX_RAM_DED_ERR) },
+ { DED_EVENT(PCIE2AXI_RAM_DED_ERR) },
+ { DED_EVENT(AXI2PCIE_RAM_DED_ERR) },
+ { LOCAL_EVENT(DMA_END_ENGINE_0) },
+ { LOCAL_EVENT(DMA_END_ENGINE_1) },
+ { LOCAL_EVENT(DMA_ERROR_ENGINE_0) },
+ { LOCAL_EVENT(DMA_ERROR_ENGINE_1) },
+ { LOCAL_EVENT(A_ATR_EVT_POST_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_FETCH_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_DISCARD_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_DOORBELL) },
+ { LOCAL_EVENT(P_ATR_EVT_POST_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_FETCH_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_DISCARD_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_DOORBELL) },
+ { LOCAL_EVENT(PM_MSI_INT_INTX) },
+ { LOCAL_EVENT(PM_MSI_INT_MSI) },
+ { LOCAL_EVENT(PM_MSI_INT_AER_EVT) },
+ { LOCAL_EVENT(PM_MSI_INT_EVENTS) },
+ { LOCAL_EVENT(PM_MSI_INT_SYS_ERR) },
+};
+
+static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" };
+
+static struct mc_pcie *port;
+
+static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *ecam)
+{
+ struct plda_msi *msi = &port->plda.msi;
+ u16 reg;
+ u8 queue_size;
+
+ /* Fixup MSI enable flag */
+ reg = readw_relaxed(ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
+ reg |= PCI_MSI_FLAGS_ENABLE;
+ writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
+
+ /* Fixup PCI MSI queue flags */
+ queue_size = FIELD_GET(PCI_MSI_FLAGS_QMASK, reg);
+ reg |= FIELD_PREP(PCI_MSI_FLAGS_QSIZE, queue_size);
+ writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
+
+ /* Fixup MSI addr fields */
+ writel_relaxed(lower_32_bits(msi->vector_phy),
+ ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_LO);
+ writel_relaxed(upper_32_bits(msi->vector_phy),
+ ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_HI);
+}
+
+static inline u32 reg_to_event(u32 reg, struct event_map field)
+{
+ return (reg & field.reg_mask) ? BIT(field.event_bit) : 0;
+}
+
+static u32 pcie_events(struct mc_pcie *port)
+{
+ u32 reg = readl_relaxed(port->ctrl_base_addr + PCIE_EVENT_INT);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pcie_event_to_event); i++)
+ val |= reg_to_event(reg, pcie_event_to_event[i]);
+
+ return val;
+}
+
+static u32 sec_errors(struct mc_pcie *port)
+{
+ u32 reg = readl_relaxed(port->ctrl_base_addr + SEC_ERROR_INT);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sec_error_to_event); i++)
+ val |= reg_to_event(reg, sec_error_to_event[i]);
+
+ return val;
+}
+
+static u32 ded_errors(struct mc_pcie *port)
+{
+ u32 reg = readl_relaxed(port->ctrl_base_addr + DED_ERROR_INT);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ded_error_to_event); i++)
+ val |= reg_to_event(reg, ded_error_to_event[i]);
+
+ return val;
+}
+
+static u32 local_events(struct mc_pcie *port)
+{
+ u32 reg = readl_relaxed(port->bridge_base_addr + ISTATUS_LOCAL);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(local_status_to_event); i++)
+ val |= reg_to_event(reg, local_status_to_event[i]);
+
+ return val;
+}
+
+static u32 mc_get_events(struct plda_pcie_rp *port)
+{
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
+ u32 events = 0;
+
+ events |= pcie_events(mc_port);
+ events |= sec_errors(mc_port);
+ events |= ded_errors(mc_port);
+ events |= local_events(mc_port);
+
+ return events;
+}
+
+static irqreturn_t mc_event_handler(int irq, void *dev_id)
+{
+ struct plda_pcie_rp *port = dev_id;
+ struct device *dev = port->dev;
+ struct irq_data *data;
+
+ data = irq_domain_get_irq_data(port->event_domain, irq);
+
+ if (event_cause[data->hwirq].str)
+ dev_err_ratelimited(dev, "%s\n", event_cause[data->hwirq].str);
+ else
+ dev_err_ratelimited(dev, "bad event IRQ %ld\n", data->hwirq);
+
+ return IRQ_HANDLED;
+}
+
+static void mc_ack_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+
+ if (event_descs[event].offset == ISTATUS_LOCAL)
+ addr = mc_port->bridge_base_addr;
+ else
+ addr = mc_port->ctrl_base_addr;
+
+ addr += event_descs[event].offset;
+ mask = event_descs[event].mask;
+ mask |= event_descs[event].enb_mask;
+
+ writel_relaxed(mask, addr);
+}
+
+static void mc_mask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+ u32 val;
+
+ if (event_descs[event].offset == ISTATUS_LOCAL)
+ addr = mc_port->bridge_base_addr;
+ else
+ addr = mc_port->ctrl_base_addr;
+
+ addr += event_descs[event].mask_offset;
+ mask = event_descs[event].mask;
+ if (event_descs[event].enb_mask) {
+ mask <<= PCIE_EVENT_INT_ENB_SHIFT;
+ mask &= PCIE_EVENT_INT_ENB_MASK;
+ }
+
+ if (!event_descs[event].mask_high)
+ mask = ~mask;
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(addr);
+ if (event_descs[event].mask_high)
+ val |= mask;
+ else
+ val &= mask;
+
+ writel_relaxed(val, addr);
+ raw_spin_unlock(&port->lock);
+}
+
+static void mc_unmask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+ u32 val;
+
+ if (event_descs[event].offset == ISTATUS_LOCAL)
+ addr = mc_port->bridge_base_addr;
+ else
+ addr = mc_port->ctrl_base_addr;
+
+ addr += event_descs[event].mask_offset;
+ mask = event_descs[event].mask;
+
+ if (event_descs[event].enb_mask)
+ mask <<= PCIE_EVENT_INT_ENB_SHIFT;
+
+ if (event_descs[event].mask_high)
+ mask = ~mask;
+
+ if (event_descs[event].enb_mask)
+ mask &= PCIE_EVENT_INT_ENB_MASK;
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(addr);
+ if (event_descs[event].mask_high)
+ val &= mask;
+ else
+ val |= mask;
+ writel_relaxed(val, addr);
+ raw_spin_unlock(&port->lock);
+}
+
+static struct irq_chip mc_event_irq_chip = {
+ .name = "Microchip PCIe EVENT",
+ .irq_ack = mc_ack_event_irq,
+ .irq_mask = mc_mask_event_irq,
+ .irq_unmask = mc_unmask_event_irq,
+};
+
+static inline void mc_pcie_deinit_clk(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
+static inline struct clk *mc_pcie_init_clk(struct device *dev, const char *id)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get_optional(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+ if (!clk)
+ return clk;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ devm_add_action_or_reset(dev, mc_pcie_deinit_clk, clk);
+
+ return clk;
+}
+
+static int mc_pcie_init_clks(struct device *dev)
+{
+ int i;
+ struct clk *fic;
+
+ /*
+ * PCIe may be clocked via Fabric Interface using between 1 and 4
+ * clocks. Scan DT for clocks and enable them if present
+ */
+ for (i = 0; i < ARRAY_SIZE(poss_clks); i++) {
+ fic = mc_pcie_init_clk(dev, poss_clks[i]);
+ if (IS_ERR(fic))
+ return PTR_ERR(fic);
+ }
+
+ return 0;
+}
+
+static int mc_request_event_irq(struct plda_pcie_rp *plda, int event_irq,
+ int event)
+{
+ return devm_request_irq(plda->dev, event_irq, mc_event_handler,
+ 0, event_cause[event].sym, plda);
+}
+
+static const struct plda_event_ops mc_event_ops = {
+ .get_events = mc_get_events,
+};
+
+static const struct plda_event mc_event = {
+ .request_event_irq = mc_request_event_irq,
+ .intx_event = EVENT_LOCAL_PM_MSI_INT_INTX,
+ .msi_event = EVENT_LOCAL_PM_MSI_INT_MSI,
+};
+
+static inline void mc_clear_secs(struct mc_pcie *port)
+{
+ writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT,
+ port->ctrl_base_addr + SEC_ERROR_INT);
+ writel_relaxed(0, port->ctrl_base_addr + SEC_ERROR_EVENT_CNT);
+}
+
+static inline void mc_clear_deds(struct mc_pcie *port)
+{
+ writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT,
+ port->ctrl_base_addr + DED_ERROR_INT);
+ writel_relaxed(0, port->ctrl_base_addr + DED_ERROR_EVENT_CNT);
+}
+
+static void mc_disable_interrupts(struct mc_pcie *port)
+{
+ u32 val;
+
+ /* Ensure ECC bypass is enabled */
+ val = ECC_CONTROL_TX_RAM_ECC_BYPASS |
+ ECC_CONTROL_RX_RAM_ECC_BYPASS |
+ ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS |
+ ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS;
+ writel_relaxed(val, port->ctrl_base_addr + ECC_CONTROL);
+
+ /* Disable SEC errors and clear any outstanding */
+ writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT,
+ port->ctrl_base_addr + SEC_ERROR_INT_MASK);
+ mc_clear_secs(port);
+
+ /* Disable DED errors and clear any outstanding */
+ writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT,
+ port->ctrl_base_addr + DED_ERROR_INT_MASK);
+ mc_clear_deds(port);
+
+ /* Disable local interrupts and clear any outstanding */
+ writel_relaxed(0, port->bridge_base_addr + IMASK_LOCAL);
+ writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_LOCAL);
+ writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_MSI);
+
+ /* Disable PCIe events and clear any outstanding */
+ val = PCIE_EVENT_INT_L2_EXIT_INT |
+ PCIE_EVENT_INT_HOTRST_EXIT_INT |
+ PCIE_EVENT_INT_DLUP_EXIT_INT |
+ PCIE_EVENT_INT_L2_EXIT_INT_MASK |
+ PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK |
+ PCIE_EVENT_INT_DLUP_EXIT_INT_MASK;
+ writel_relaxed(val, port->ctrl_base_addr + PCIE_EVENT_INT);
+
+ /* Disable host interrupts and clear any outstanding */
+ writel_relaxed(0, port->bridge_base_addr + IMASK_HOST);
+ writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_HOST);
+}
+
+static void mc_pcie_setup_inbound_atr(struct mc_pcie *port, int window_index,
+ u64 axi_addr, u64 pcie_addr, u64 size)
+{
+ u32 table_offset = window_index * ATR_ENTRY_SIZE;
+ void __iomem *table_addr = port->bridge_base_addr + table_offset;
+ u32 atr_sz;
+ u32 val;
+
+ atr_sz = ilog2(size) - 1;
+
+ val = ALIGN_DOWN(lower_32_bits(pcie_addr), SZ_4K);
+ val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
+ val |= ATR_IMPL_ENABLE;
+
+ writel(val, table_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+
+ writel(upper_32_bits(pcie_addr), table_addr + ATR0_PCIE_WIN0_SRC_ADDR);
+
+ writel(lower_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_LSB);
+ writel(upper_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_UDW);
+
+ writel(TRSL_ID_AXI4_MASTER_0, table_addr + ATR0_PCIE_WIN0_TRSL_PARAM);
+}
+
+static int mc_pcie_setup_inbound_ranges(struct platform_device *pdev,
+ struct mc_pcie *port)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node;
+ struct of_range_parser parser;
+ struct of_range range;
+ int atr_index = 0;
+
+ /*
+ * MPFS PCIe Root Port is 32-bit only, behind a Fabric Interface
+ * Controller FPGA logic block which contains the AXI-S interface.
+ *
+ * From the point of view of the PCIe Root Port, there are only two
+ * supported Root Port configurations:
+ *
+ * Configuration 1: for use with fully coherent designs; supports a
+ * window from 0x0 (CPU space) to specified PCIe space.
+ *
+ * Configuration 2: for use with non-coherent designs; supports two
+ * 1 GB windows to CPU space; one mapping CPU space 0 to PCIe space
+ * 0x80000000 and a second mapping CPU space 0x40000000 to PCIe
+ * space 0xc0000000. This cfg needs two windows because of how the
+ * MSI space is allocated in the AXI-S range on MPFS.
+ *
+ * The FIC interface outside the PCIe block *must* complete the
+ * inbound address translation as per MCHP MPFS FPGA design
+ * guidelines.
+ */
+ if (device_property_read_bool(dev, "dma-noncoherent")) {
+ /*
+ * Always need same two tables in this case. Need two tables
+ * due to hardware interactions between address and size.
+ */
+ mc_pcie_setup_inbound_atr(port, 0, 0,
+ MPFS_NC_BOUNCE_ADDR, SZ_1G);
+ mc_pcie_setup_inbound_atr(port, 1, SZ_1G,
+ MPFS_NC_BOUNCE_ADDR + SZ_1G, SZ_1G);
+ } else {
+ /* Find any DMA ranges */
+ if (of_pci_dma_range_parser_init(&parser, dn)) {
+ /* No DMA range property - setup default */
+ mc_pcie_setup_inbound_atr(port, 0, 0, 0, SZ_4G);
+ return 0;
+ }
+
+ for_each_of_range(&parser, &range) {
+ if (atr_index >= MC_MAX_NUM_INBOUND_WINDOWS) {
+ dev_err(dev, "too many inbound ranges; %d available tables\n",
+ MC_MAX_NUM_INBOUND_WINDOWS);
+ return -EINVAL;
+ }
+ mc_pcie_setup_inbound_atr(port, atr_index, 0,
+ range.pci_addr, range.size);
+ atr_index++;
+ }
+ }
+
+ return 0;
+}
+
+static int mc_platform_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
+ int ret;
+
+ /* Configure address translation table 0 for PCIe config space */
+ plda_pcie_setup_window(port->bridge_base_addr, 0, cfg->res.start,
+ cfg->res.start,
+ resource_size(&cfg->res));
+
+ /* Need some fixups in config space */
+ mc_pcie_enable_msi(port, cfg->win);
+
+ /* Configure non-config space outbound ranges */
+ ret = plda_pcie_setup_iomems(bridge, &port->plda);
+ if (ret)
+ return ret;
+
+ ret = mc_pcie_setup_inbound_ranges(pdev, port);
+ if (ret)
+ return ret;
+
+ port->plda.event_ops = &mc_event_ops;
+ port->plda.event_irq_chip = &mc_event_irq_chip;
+ port->plda.events_bitmap = GENMASK(NUM_EVENTS - 1, 0);
+
+ /* Address translation is up; safe to enable interrupts */
+ ret = plda_init_interrupts(pdev, &port->plda, &mc_event);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mc_host_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *apb_base_addr;
+ struct plda_pcie_rp *plda;
+ int ret;
+ u32 val;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ plda = &port->plda;
+ plda->dev = dev;
+
+ port->bridge_base_addr = devm_platform_ioremap_resource_byname(pdev,
+ "bridge");
+ port->ctrl_base_addr = devm_platform_ioremap_resource_byname(pdev,
+ "ctrl");
+ if (!IS_ERR(port->bridge_base_addr) && !IS_ERR(port->ctrl_base_addr))
+ goto addrs_set;
+
+ /*
+ * The original, incorrect, binding that lumped the control and
+ * bridge addresses together still needs to be handled by the driver.
+ */
+ apb_base_addr = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(apb_base_addr))
+ return dev_err_probe(dev, PTR_ERR(apb_base_addr),
+ "both legacy apb register and ctrl/bridge regions missing");
+
+ port->bridge_base_addr = apb_base_addr + MC_PCIE1_BRIDGE_ADDR;
+ port->ctrl_base_addr = apb_base_addr + MC_PCIE1_CTRL_ADDR;
+
+addrs_set:
+ mc_disable_interrupts(port);
+
+ plda->bridge_addr = port->bridge_base_addr;
+ plda->num_events = NUM_EVENTS;
+
+ /* Allow enabling MSI by disabling MSI-X */
+ val = readl(port->bridge_base_addr + PCIE_PCI_IRQ_DW0);
+ val &= ~MSIX_CAP_MASK;
+ writel(val, port->bridge_base_addr + PCIE_PCI_IRQ_DW0);
+
+ /* Pick num vectors from bitfile programmed onto FPGA fabric */
+ val = readl(port->bridge_base_addr + PCIE_PCI_IRQ_DW0);
+ val &= NUM_MSI_MSGS_MASK;
+ val >>= NUM_MSI_MSGS_SHIFT;
+
+ plda->msi.num_vectors = 1 << val;
+
+ /* Pick vector address from design */
+ plda->msi.vector_phy = readl_relaxed(port->bridge_base_addr + IMSI_ADDR);
+
+ ret = mc_pcie_init_clks(dev);
+ if (ret) {
+ dev_err(dev, "failed to get clock resources, error %d\n", ret);
+ return -ENODEV;
+ }
+
+ return pci_host_common_probe(pdev);
+}
+
+static const struct pci_ecam_ops mc_ecam_ops = {
+ .init = mc_platform_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+static const struct of_device_id mc_pcie_of_match[] = {
+ {
+ .compatible = "microchip,pcie-host-1.0",
+ .data = &mc_ecam_ops,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mc_pcie_of_match);
+
+static struct platform_driver mc_pcie_driver = {
+ .probe = mc_host_probe,
+ .driver = {
+ .name = "microchip-pcie",
+ .of_match_table = mc_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(mc_pcie_driver);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microchip PCIe host controller driver");
+MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
new file mode 100644
index 000000000000..3c2f68383010
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-plda-host.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLDA PCIe XpressRich host controller driver
+ *
+ * Copyright (C) 2023 Microchip Co. Ltd
+ * StarFive Co. Ltd
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ */
+
+#include <linux/align.h>
+#include <linux/bitfield.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/pci_regs.h>
+#include <linux/pci-ecam.h>
+#include <linux/wordpart.h>
+
+#include "pcie-plda.h"
+
+void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct plda_pcie_rp *pcie = bus->sysdata;
+
+ return pcie->config_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_map_bus);
+
+static void plda_handle_msi(struct irq_desc *desc)
+{
+ struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct device *dev = port->dev;
+ struct plda_msi *msi = &port->msi;
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long status;
+ u32 bit;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+
+ status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
+ if (status & PM_MSI_INT_MSI_MASK) {
+ writel_relaxed(status & PM_MSI_INT_MSI_MASK,
+ bridge_base_addr + ISTATUS_LOCAL);
+ status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
+ for_each_set_bit(bit, &status, msi->num_vectors) {
+ ret = generic_handle_domain_irq(msi->dev_domain, bit);
+ if (ret)
+ dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
+ bit);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void plda_msi_bottom_irq_ack(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ u32 bitpos = data->hwirq;
+
+ writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
+}
+
+static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = port->msi.vector_phy;
+
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = data->hwirq;
+
+ dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static struct irq_chip plda_msi_bottom_irq_chip = {
+ .name = "PLDA MSI",
+ .irq_ack = plda_msi_bottom_irq_ack,
+ .irq_compose_msi_msg = plda_compose_msi_msg,
+};
+
+static int plda_irq_msi_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs,
+ void *args)
+{
+ struct plda_pcie_rp *port = domain->host_data;
+ struct plda_msi *msi = &port->msi;
+ unsigned long bit;
+
+ mutex_lock(&msi->lock);
+ bit = find_first_zero_bit(msi->used, msi->num_vectors);
+ if (bit >= msi->num_vectors) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ set_bit(bit, msi->used);
+
+ irq_domain_set_info(domain, virq, bit, &plda_msi_bottom_irq_chip,
+ domain->host_data, handle_edge_irq, NULL, NULL);
+
+ mutex_unlock(&msi->lock);
+
+ return 0;
+}
+
+static void plda_irq_msi_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(d);
+ struct plda_msi *msi = &port->msi;
+
+ mutex_lock(&msi->lock);
+
+ if (test_bit(d->hwirq, msi->used))
+ __clear_bit(d->hwirq, msi->used);
+ else
+ dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
+
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = plda_irq_msi_domain_alloc,
+ .free = plda_irq_msi_domain_free,
+};
+
+#define PLDA_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+#define PLDA_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
+
+static const struct msi_parent_ops plda_msi_parent_ops = {
+ .required_flags = PLDA_MSI_FLAGS_REQUIRED,
+ .supported_flags = PLDA_MSI_FLAGS_SUPPORTED,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "PLDA-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
+static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
+{
+ struct device *dev = port->dev;
+ struct plda_msi *msi = &port->msi;
+
+ mutex_init(&port->msi.lock);
+
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &msi_domain_ops,
+ .host_data = port,
+ .size = msi->num_vectors,
+ };
+
+ msi->dev_domain = msi_create_parent_irq_domain(&info, &plda_msi_parent_ops);
+ if (!msi->dev_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void plda_handle_intx(struct irq_desc *desc)
+{
+ struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct device *dev = port->dev;
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long status;
+ u32 bit;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+
+ status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
+ if (status & PM_MSI_INT_INTX_MASK) {
+ status &= PM_MSI_INT_INTX_MASK;
+ status >>= PM_MSI_INT_INTX_SHIFT;
+ for_each_set_bit(bit, &status, PCI_NUM_INTX) {
+ ret = generic_handle_domain_irq(port->intx_domain, bit);
+ if (ret)
+ dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
+ bit);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void plda_ack_intx_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+
+ writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
+}
+
+static void plda_mask_intx_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long flags;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
+ val &= ~mask;
+ writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void plda_unmask_intx_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long flags;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
+ val |= mask;
+ writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip plda_intx_irq_chip = {
+ .name = "PLDA PCIe INTx",
+ .irq_ack = plda_ack_intx_irq,
+ .irq_mask = plda_mask_intx_irq,
+ .irq_unmask = plda_unmask_intx_irq,
+};
+
+static int plda_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &plda_intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = plda_pcie_intx_map,
+};
+
+static u32 plda_get_events(struct plda_pcie_rp *port)
+{
+ u32 events, val, origin;
+
+ origin = readl_relaxed(port->bridge_addr + ISTATUS_LOCAL);
+
+ /* MSI event and sys events */
+ val = (origin & SYS_AND_MSI_MASK) >> PM_MSI_INT_MSI_SHIFT;
+ events = val << (PM_MSI_INT_MSI_SHIFT - PCI_NUM_INTX + 1);
+
+ /* INTx events */
+ if (origin & PM_MSI_INT_INTX_MASK)
+ events |= BIT(PM_MSI_INT_INTX_SHIFT);
+
+ /* remains are same with register */
+ events |= origin & GENMASK(P_ATR_EVT_DOORBELL_SHIFT, 0);
+
+ return events;
+}
+
+static irqreturn_t plda_event_handler(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
+static void plda_handle_event(struct irq_desc *desc)
+{
+ struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
+ unsigned long events;
+ u32 bit;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ events = port->event_ops->get_events(port);
+
+ events &= port->events_bitmap;
+ for_each_set_bit(bit, &events, port->num_events)
+ generic_handle_domain_irq(port->event_domain, bit);
+
+ chained_irq_exit(chip, desc);
+}
+
+static u32 plda_hwirq_to_mask(int hwirq)
+{
+ u32 mask;
+
+ /* hwirq 23 - 0 are the same with register */
+ if (hwirq < EVENT_PM_MSI_INT_INTX)
+ mask = BIT(hwirq);
+ else if (hwirq == EVENT_PM_MSI_INT_INTX)
+ mask = PM_MSI_INT_INTX_MASK;
+ else
+ mask = BIT(hwirq + PCI_NUM_INTX - 1);
+
+ return mask;
+}
+
+static void plda_ack_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+
+ writel_relaxed(plda_hwirq_to_mask(data->hwirq),
+ port->bridge_addr + ISTATUS_LOCAL);
+}
+
+static void plda_mask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ u32 mask, val;
+
+ mask = plda_hwirq_to_mask(data->hwirq);
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
+ val &= ~mask;
+ writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
+ raw_spin_unlock(&port->lock);
+}
+
+static void plda_unmask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ u32 mask, val;
+
+ mask = plda_hwirq_to_mask(data->hwirq);
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
+ val |= mask;
+ writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
+ raw_spin_unlock(&port->lock);
+}
+
+static struct irq_chip plda_event_irq_chip = {
+ .name = "PLDA PCIe EVENT",
+ .irq_ack = plda_ack_event_irq,
+ .irq_mask = plda_mask_event_irq,
+ .irq_unmask = plda_unmask_event_irq,
+};
+
+static const struct plda_event_ops plda_event_ops = {
+ .get_events = plda_get_events,
+};
+
+static int plda_pcie_event_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct plda_pcie_rp *port = (void *)domain->host_data;
+
+ irq_set_chip_and_handler(irq, port->event_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops plda_event_domain_ops = {
+ .map = plda_pcie_event_map,
+};
+
+static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "failed to find PCIe Intc node\n");
+ return -EINVAL;
+ }
+
+ port->event_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ port->num_events, &plda_event_domain_ops,
+ port);
+ if (!port->event_domain) {
+ dev_err(dev, "failed to get event domain\n");
+ of_node_put(pcie_intc_node);
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
+
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
+ if (!port->intx_domain) {
+ dev_err(dev, "failed to get an INTx IRQ domain\n");
+ of_node_put(pcie_intc_node);
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
+
+ of_node_put(pcie_intc_node);
+ raw_spin_lock_init(&port->lock);
+
+ return plda_allocate_msi_domains(port);
+}
+
+int plda_init_interrupts(struct platform_device *pdev,
+ struct plda_pcie_rp *port,
+ const struct plda_event *event)
+{
+ struct device *dev = &pdev->dev;
+ int event_irq, ret;
+ u32 i;
+
+ if (!port->event_ops)
+ port->event_ops = &plda_event_ops;
+
+ if (!port->event_irq_chip)
+ port->event_irq_chip = &plda_event_irq_chip;
+
+ ret = plda_pcie_init_irq_domains(port);
+ if (ret) {
+ dev_err(dev, "failed creating IRQ domains\n");
+ return ret;
+ }
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0)
+ return -ENODEV;
+
+ for_each_set_bit(i, &port->events_bitmap, port->num_events) {
+ event_irq = irq_create_mapping(port->event_domain, i);
+ if (!event_irq) {
+ dev_err(dev, "failed to map hwirq %d\n", i);
+ return -ENXIO;
+ }
+
+ if (event->request_event_irq)
+ ret = event->request_event_irq(port, event_irq, i);
+ else
+ ret = devm_request_irq(dev, event_irq,
+ plda_event_handler,
+ 0, NULL, port);
+
+ if (ret) {
+ dev_err(dev, "failed to request IRQ %d\n", event_irq);
+ return ret;
+ }
+ }
+
+ port->intx_irq = irq_create_mapping(port->event_domain,
+ event->intx_event);
+ if (!port->intx_irq) {
+ dev_err(dev, "failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ /* Plug the INTx chained handler */
+ irq_set_chained_handler_and_data(port->intx_irq, plda_handle_intx, port);
+
+ port->msi_irq = irq_create_mapping(port->event_domain,
+ event->msi_event);
+ if (!port->msi_irq)
+ return -ENXIO;
+
+ /* Plug the MSI chained handler */
+ irq_set_chained_handler_and_data(port->msi_irq, plda_handle_msi, port);
+
+ /* Plug the main event chained handler */
+ irq_set_chained_handler_and_data(port->irq, plda_handle_event, port);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(plda_init_interrupts);
+
+void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ phys_addr_t axi_addr, phys_addr_t pci_addr,
+ size_t size)
+{
+ u32 atr_sz = ilog2(size) - 1;
+ u32 val;
+
+ if (index == 0)
+ val = PCIE_CONFIG_INTERFACE;
+ else
+ val = PCIE_TX_RX_INTERFACE;
+
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_PARAM);
+
+ val = ALIGN_DOWN(lower_32_bits(axi_addr), SZ_4K);
+ val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
+ val |= ATR_IMPL_ENABLE;
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_SRCADDR_PARAM);
+
+ val = upper_32_bits(axi_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_SRC_ADDR);
+
+ val = lower_32_bits(pci_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
+
+ val = upper_32_bits(pci_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
+
+void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port)
+{
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ u32 val;
+
+ val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
+ writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_setup_inbound_address_translation);
+
+int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
+ struct plda_pcie_rp *port)
+{
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ struct resource_entry *entry;
+ u64 pci_addr;
+ u32 index = 1;
+
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ if (resource_type(entry->res) == IORESOURCE_MEM) {
+ pci_addr = entry->res->start - entry->offset;
+ plda_pcie_setup_window(bridge_base_addr, index,
+ entry->res->start, pci_addr,
+ resource_size(entry->res));
+ index++;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(plda_pcie_setup_iomems);
+
+static void plda_pcie_irq_domain_deinit(struct plda_pcie_rp *pcie)
+{
+ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
+ irq_set_chained_handler_and_data(pcie->msi_irq, NULL, NULL);
+ irq_set_chained_handler_and_data(pcie->intx_irq, NULL, NULL);
+
+ irq_domain_remove(pcie->msi.dev_domain);
+
+ irq_domain_remove(pcie->intx_domain);
+ irq_domain_remove(pcie->event_domain);
+}
+
+int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
+ const struct plda_event *plda_event)
+{
+ struct device *dev = port->dev;
+ struct pci_host_bridge *bridge;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *cfg_res;
+ int ret;
+
+ pdev = to_platform_device(dev);
+
+ port->bridge_addr =
+ devm_platform_ioremap_resource_byname(pdev, "apb");
+
+ if (IS_ERR(port->bridge_addr))
+ return dev_err_probe(dev, PTR_ERR(port->bridge_addr),
+ "failed to map reg memory\n");
+
+ cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ if (!cfg_res)
+ return dev_err_probe(dev, -ENODEV,
+ "failed to get config memory\n");
+
+ port->config_base = devm_ioremap_resource(dev, cfg_res);
+ if (IS_ERR(port->config_base))
+ return dev_err_probe(dev, PTR_ERR(port->config_base),
+ "failed to map config memory\n");
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return -ENOMEM;
+
+ if (port->host_ops && port->host_ops->host_init) {
+ ret = port->host_ops->host_init(port);
+ if (ret)
+ return ret;
+ }
+
+ port->bridge = bridge;
+ plda_pcie_setup_window(port->bridge_addr, 0, cfg_res->start, 0,
+ resource_size(cfg_res));
+ plda_pcie_setup_iomems(bridge, port);
+ plda_set_default_msi(&port->msi);
+ ret = plda_init_interrupts(pdev, port, plda_event);
+ if (ret)
+ goto err_host;
+
+ /* Set default bus ops */
+ bridge->ops = ops;
+ bridge->sysdata = port;
+
+ ret = pci_host_probe(bridge);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "failed to probe pci host\n");
+ goto err_probe;
+ }
+
+ return ret;
+
+err_probe:
+ plda_pcie_irq_domain_deinit(port);
+err_host:
+ if (port->host_ops && port->host_ops->host_deinit)
+ port->host_ops->host_deinit(port);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(plda_pcie_host_init);
+
+void plda_pcie_host_deinit(struct plda_pcie_rp *port)
+{
+ pci_stop_root_bus(port->bridge->bus);
+ pci_remove_root_bus(port->bridge->bus);
+
+ plda_pcie_irq_domain_deinit(port);
+
+ if (port->host_ops && port->host_ops->host_deinit)
+ port->host_ops->host_deinit(port);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_host_deinit);
diff --git a/drivers/pci/controller/plda/pcie-plda.h b/drivers/pci/controller/plda/pcie-plda.h
new file mode 100644
index 000000000000..6b8665df7bf0
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-plda.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PLDA PCIe host controller driver
+ */
+
+#ifndef _PCIE_PLDA_H
+#define _PCIE_PLDA_H
+
+/* Number of MSI IRQs */
+#define PLDA_MAX_NUM_MSI_IRQS 32
+
+/* PCIe Bridge Phy Regs */
+#define GEN_SETTINGS 0x80
+#define RP_ENABLE 1
+#define PCIE_PCI_IDS_DW1 0x9c
+#define IDS_CLASS_CODE_SHIFT 16
+#define REVISION_ID_MASK GENMASK(7, 0)
+#define CLASS_CODE_ID_MASK GENMASK(31, 8)
+#define PCIE_PCI_IRQ_DW0 0xa8
+#define MSIX_CAP_MASK BIT(31)
+#define NUM_MSI_MSGS_MASK GENMASK(6, 4)
+#define NUM_MSI_MSGS_SHIFT 4
+#define PCI_MISC 0xb4
+#define PHY_FUNCTION_DIS BIT(15)
+#define PCIE_WINROM 0xfc
+#define PREF_MEM_WIN_64_SUPPORT BIT(3)
+
+#define IMASK_LOCAL 0x180
+#define DMA_END_ENGINE_0_MASK 0x00000000u
+#define DMA_END_ENGINE_0_SHIFT 0
+#define DMA_END_ENGINE_1_MASK 0x00000000u
+#define DMA_END_ENGINE_1_SHIFT 1
+#define DMA_ERROR_ENGINE_0_MASK 0x00000100u
+#define DMA_ERROR_ENGINE_0_SHIFT 8
+#define DMA_ERROR_ENGINE_1_MASK 0x00000200u
+#define DMA_ERROR_ENGINE_1_SHIFT 9
+#define A_ATR_EVT_POST_ERR_MASK 0x00010000u
+#define A_ATR_EVT_POST_ERR_SHIFT 16
+#define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u
+#define A_ATR_EVT_FETCH_ERR_SHIFT 17
+#define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u
+#define A_ATR_EVT_DISCARD_ERR_SHIFT 18
+#define A_ATR_EVT_DOORBELL_MASK 0x00000000u
+#define A_ATR_EVT_DOORBELL_SHIFT 19
+#define P_ATR_EVT_POST_ERR_MASK 0x00100000u
+#define P_ATR_EVT_POST_ERR_SHIFT 20
+#define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u
+#define P_ATR_EVT_FETCH_ERR_SHIFT 21
+#define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u
+#define P_ATR_EVT_DISCARD_ERR_SHIFT 22
+#define P_ATR_EVT_DOORBELL_MASK 0x00000000u
+#define P_ATR_EVT_DOORBELL_SHIFT 23
+#define PM_MSI_INT_INTA_MASK 0x01000000u
+#define PM_MSI_INT_INTA_SHIFT 24
+#define PM_MSI_INT_INTB_MASK 0x02000000u
+#define PM_MSI_INT_INTB_SHIFT 25
+#define PM_MSI_INT_INTC_MASK 0x04000000u
+#define PM_MSI_INT_INTC_SHIFT 26
+#define PM_MSI_INT_INTD_MASK 0x08000000u
+#define PM_MSI_INT_INTD_SHIFT 27
+#define PM_MSI_INT_INTX_MASK 0x0f000000u
+#define PM_MSI_INT_INTX_SHIFT 24
+#define PM_MSI_INT_MSI_MASK 0x10000000u
+#define PM_MSI_INT_MSI_SHIFT 28
+#define PM_MSI_INT_AER_EVT_MASK 0x20000000u
+#define PM_MSI_INT_AER_EVT_SHIFT 29
+#define PM_MSI_INT_EVENTS_MASK 0x40000000u
+#define PM_MSI_INT_EVENTS_SHIFT 30
+#define PM_MSI_INT_SYS_ERR_MASK 0x80000000u
+#define PM_MSI_INT_SYS_ERR_SHIFT 31
+#define SYS_AND_MSI_MASK GENMASK(31, 28)
+#define NUM_LOCAL_EVENTS 15
+#define ISTATUS_LOCAL 0x184
+#define IMASK_HOST 0x188
+#define ISTATUS_HOST 0x18c
+#define IMSI_ADDR 0x190
+#define ISTATUS_MSI 0x194
+#define PMSG_SUPPORT_RX 0x3f0
+#define PMSG_LTR_SUPPORT BIT(2)
+
+/* PCIe Master table init defines */
+#define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
+#define ATR0_PCIE_ATR_SIZE 0x25
+#define ATR0_PCIE_ATR_SIZE_SHIFT 1
+#define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
+#define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u
+#define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu
+#define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u
+
+/* PCIe AXI slave table init defines */
+#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
+#define ATR_SIZE_MASK GENMASK(6, 1)
+#define ATR_IMPL_ENABLE BIT(0)
+#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
+#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
+#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
+#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
+#define PCIE_TX_RX_INTERFACE 0x00000000u
+#define PCIE_CONFIG_INTERFACE 0x00000001u
+#define TRSL_ID_AXI4_MASTER_0 0x00000004u
+
+#define CONFIG_SPACE_ADDR_OFFSET 0x1000u
+
+#define ATR_ENTRY_SIZE 32
+
+enum plda_int_event {
+ PLDA_AXI_POST_ERR,
+ PLDA_AXI_FETCH_ERR,
+ PLDA_AXI_DISCARD_ERR,
+ PLDA_AXI_DOORBELL,
+ PLDA_PCIE_POST_ERR,
+ PLDA_PCIE_FETCH_ERR,
+ PLDA_PCIE_DISCARD_ERR,
+ PLDA_PCIE_DOORBELL,
+ PLDA_INTX,
+ PLDA_MSI,
+ PLDA_AER_EVENT,
+ PLDA_MISC_EVENTS,
+ PLDA_SYS_ERR,
+ PLDA_INT_EVENT_NUM
+};
+
+#define PLDA_NUM_DMA_EVENTS 16
+
+#define EVENT_PM_MSI_INT_INTX (PLDA_NUM_DMA_EVENTS + PLDA_INTX)
+#define EVENT_PM_MSI_INT_MSI (PLDA_NUM_DMA_EVENTS + PLDA_MSI)
+#define PLDA_MAX_EVENT_NUM (PLDA_NUM_DMA_EVENTS + PLDA_INT_EVENT_NUM)
+
+/*
+ * PLDA interrupt register
+ *
+ * 31 27 23 15 7 0
+ * +--+--+--+-+------+-+-+-+-+-+-+-+-+-----------+-----------+
+ * |12|11|10|9| intx |7|6|5|4|3|2|1|0| DMA error | DMA end |
+ * +--+--+--+-+------+-+-+-+-+-+-+-+-+-----------+-----------+
+ * event bit
+ * 0-7 (0-7) DMA interrupt end : reserved for vendor implement
+ * 8-15 (8-15) DMA error : reserved for vendor implement
+ * 16 (16) AXI post error (PLDA_AXI_POST_ERR)
+ * 17 (17) AXI fetch error (PLDA_AXI_FETCH_ERR)
+ * 18 (18) AXI discard error (PLDA_AXI_DISCARD_ERR)
+ * 19 (19) AXI doorbell (PLDA_PCIE_DOORBELL)
+ * 20 (20) PCIe post error (PLDA_PCIE_POST_ERR)
+ * 21 (21) PCIe fetch error (PLDA_PCIE_FETCH_ERR)
+ * 22 (22) PCIe discard error (PLDA_PCIE_DISCARD_ERR)
+ * 23 (23) PCIe doorbell (PLDA_PCIE_DOORBELL)
+ * 24 (27-24) INTx interruts (PLDA_INTX)
+ * 25 (28): MSI interrupt (PLDA_MSI)
+ * 26 (29): AER event (PLDA_AER_EVENT)
+ * 27 (30): PM/LTR/Hotplug (PLDA_MISC_EVENTS)
+ * 28 (31): System error (PLDA_SYS_ERR)
+ */
+
+struct plda_pcie_rp;
+
+struct plda_event_ops {
+ u32 (*get_events)(struct plda_pcie_rp *pcie);
+};
+
+struct plda_pcie_host_ops {
+ int (*host_init)(struct plda_pcie_rp *pcie);
+ void (*host_deinit)(struct plda_pcie_rp *pcie);
+};
+
+struct plda_msi {
+ struct mutex lock; /* Protect used bitmap */
+ struct irq_domain *dev_domain;
+ u32 num_vectors;
+ u64 vector_phy;
+ DECLARE_BITMAP(used, PLDA_MAX_NUM_MSI_IRQS);
+};
+
+struct plda_pcie_rp {
+ struct device *dev;
+ struct pci_host_bridge *bridge;
+ struct irq_domain *intx_domain;
+ struct irq_domain *event_domain;
+ raw_spinlock_t lock;
+ struct plda_msi msi;
+ const struct plda_event_ops *event_ops;
+ const struct irq_chip *event_irq_chip;
+ const struct plda_pcie_host_ops *host_ops;
+ void __iomem *bridge_addr;
+ void __iomem *config_base;
+ unsigned long events_bitmap;
+ int irq;
+ int msi_irq;
+ int intx_irq;
+ int num_events;
+};
+
+struct plda_event {
+ int (*request_event_irq)(struct plda_pcie_rp *pcie,
+ int event_irq, int event);
+ int intx_event;
+ int msi_event;
+};
+
+void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
+int plda_init_interrupts(struct platform_device *pdev,
+ struct plda_pcie_rp *port,
+ const struct plda_event *event);
+void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ phys_addr_t axi_addr, phys_addr_t pci_addr,
+ size_t size);
+void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port);
+int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
+ struct plda_pcie_rp *port);
+int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
+ const struct plda_event *plda_event);
+void plda_pcie_host_deinit(struct plda_pcie_rp *pcie);
+
+static inline void plda_set_default_msi(struct plda_msi *msi)
+{
+ msi->vector_phy = IMSI_ADDR;
+ msi->num_vectors = PLDA_MAX_NUM_MSI_IRQS;
+}
+
+static inline void plda_pcie_enable_root_port(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + GEN_SETTINGS);
+ value |= RP_ENABLE;
+ writel_relaxed(value, plda->bridge_addr + GEN_SETTINGS);
+}
+
+static inline void plda_pcie_set_standard_class(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ /* set class code and reserve revision id */
+ value = readl_relaxed(plda->bridge_addr + PCIE_PCI_IDS_DW1);
+ value &= REVISION_ID_MASK;
+ value |= (PCI_CLASS_BRIDGE_PCI << IDS_CLASS_CODE_SHIFT);
+ writel_relaxed(value, plda->bridge_addr + PCIE_PCI_IDS_DW1);
+}
+
+static inline void plda_pcie_set_pref_win_64bit(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + PCIE_WINROM);
+ value |= PREF_MEM_WIN_64_SUPPORT;
+ writel_relaxed(value, plda->bridge_addr + PCIE_WINROM);
+}
+
+static inline void plda_pcie_disable_ltr(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + PMSG_SUPPORT_RX);
+ value &= ~PMSG_LTR_SUPPORT;
+ writel_relaxed(value, plda->bridge_addr + PMSG_SUPPORT_RX);
+}
+
+static inline void plda_pcie_disable_func(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + PCI_MISC);
+ value |= PHY_FUNCTION_DIS;
+ writel_relaxed(value, plda->bridge_addr + PCI_MISC);
+}
+
+static inline void plda_pcie_write_rc_bar(struct plda_pcie_rp *plda, u64 val)
+{
+ void __iomem *addr = plda->bridge_addr + CONFIG_SPACE_ADDR_OFFSET;
+
+ writel_relaxed(lower_32_bits(val), addr + PCI_BASE_ADDRESS_0);
+ writel_relaxed(upper_32_bits(val), addr + PCI_BASE_ADDRESS_1);
+}
+#endif /* _PCIE_PLDA_H */
diff --git a/drivers/pci/controller/plda/pcie-starfive.c b/drivers/pci/controller/plda/pcie-starfive.c
new file mode 100644
index 000000000000..3caf53c6c082
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-starfive.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for StarFive JH7110 Soc.
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include "../../pci.h"
+
+#include "pcie-plda.h"
+
+#define PCIE_FUNC_NUM 4
+
+/* system control */
+#define STG_SYSCON_PCIE0_BASE 0x48
+#define STG_SYSCON_PCIE1_BASE 0x1f8
+
+#define STG_SYSCON_AR_OFFSET 0x78
+#define STG_SYSCON_AXI4_SLVL_AR_MASK GENMASK(22, 8)
+#define STG_SYSCON_AXI4_SLVL_PHY_AR(x) FIELD_PREP(GENMASK(20, 17), x)
+#define STG_SYSCON_AW_OFFSET 0x7c
+#define STG_SYSCON_AXI4_SLVL_AW_MASK GENMASK(14, 0)
+#define STG_SYSCON_AXI4_SLVL_PHY_AW(x) FIELD_PREP(GENMASK(12, 9), x)
+#define STG_SYSCON_CLKREQ BIT(22)
+#define STG_SYSCON_CKREF_SRC_MASK GENMASK(19, 18)
+#define STG_SYSCON_RP_NEP_OFFSET 0xe8
+#define STG_SYSCON_K_RP_NEP BIT(8)
+#define STG_SYSCON_LNKSTA_OFFSET 0x170
+#define DATA_LINK_ACTIVE BIT(5)
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+
+struct starfive_jh7110_pcie {
+ struct plda_pcie_rp plda;
+ struct reset_control *resets;
+ struct clk_bulk_data *clks;
+ struct regmap *reg_syscon;
+ struct gpio_desc *power_gpio;
+ struct gpio_desc *reset_gpio;
+ struct phy *phy;
+
+ unsigned int stg_pcie_base;
+ int num_clks;
+};
+
+/*
+ * JH7110 PCIe port BAR0/1 can be configured as 64-bit prefetchable memory
+ * space. PCIe read and write requests targeting BAR0/1 are routed to so called
+ * 'Bridge Configuration space' in PLDA IP datasheet, which contains the bridge
+ * internal registers, such as interrupt, DMA and ATU registers...
+ * JH7110 can access the Bridge Configuration space by local bus, and don`t
+ * want the bridge internal registers accessed by the DMA from EP devices.
+ * Thus, they are unimplemented and should be hidden here.
+ */
+static bool starfive_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn,
+ int offset)
+{
+ if (pci_is_root_bus(bus) && !devfn &&
+ (offset == PCI_BASE_ADDRESS_0 || offset == PCI_BASE_ADDRESS_1))
+ return true;
+
+ return false;
+}
+
+static int starfive_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ if (starfive_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_SUCCESSFUL;
+
+ return pci_generic_config_write(bus, devfn, where, size, value);
+}
+
+static int starfive_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ if (starfive_pcie_hide_rc_bar(bus, devfn, where)) {
+ *value = 0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ return pci_generic_config_read(bus, devfn, where, size, value);
+}
+
+static int starfive_pcie_parse_dt(struct starfive_jh7110_pcie *pcie,
+ struct device *dev)
+{
+ int domain_nr;
+
+ pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
+ if (pcie->num_clks < 0)
+ return dev_err_probe(dev, pcie->num_clks,
+ "failed to get pcie clocks\n");
+
+ pcie->resets = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(pcie->resets))
+ return dev_err_probe(dev, PTR_ERR(pcie->resets),
+ "failed to get pcie resets");
+
+ pcie->reg_syscon =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "starfive,stg-syscon");
+
+ if (IS_ERR(pcie->reg_syscon))
+ return dev_err_probe(dev, PTR_ERR(pcie->reg_syscon),
+ "failed to parse starfive,stg-syscon\n");
+
+ pcie->phy = devm_phy_optional_get(dev, NULL);
+ if (IS_ERR(pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(pcie->phy),
+ "failed to get pcie phy\n");
+
+ /*
+ * The PCIe domain numbers are set to be static in JH7110 DTS.
+ * As the STG system controller defines different bases in PCIe RP0 &
+ * RP1, we use them to identify which controller is doing the hardware
+ * initialization.
+ */
+ domain_nr = of_get_pci_domain_nr(dev->of_node);
+
+ if (domain_nr < 0 || domain_nr > 1)
+ return dev_err_probe(dev, -ENODEV,
+ "failed to get valid pcie domain\n");
+
+ if (domain_nr == 0)
+ pcie->stg_pcie_base = STG_SYSCON_PCIE0_BASE;
+ else
+ pcie->stg_pcie_base = STG_SYSCON_PCIE1_BASE;
+
+ pcie->reset_gpio = devm_gpiod_get_optional(dev, "perst",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->reset_gpio),
+ "failed to get perst-gpio\n");
+
+ pcie->power_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->power_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->power_gpio),
+ "failed to get power-gpio\n");
+
+ return 0;
+}
+
+static struct pci_ops starfive_pcie_ops = {
+ .map_bus = plda_pcie_map_bus,
+ .read = starfive_pcie_config_read,
+ .write = starfive_pcie_config_write,
+};
+
+static int starfive_pcie_clk_rst_init(struct starfive_jh7110_pcie *pcie)
+{
+ struct device *dev = pcie->plda.dev;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable clocks\n");
+
+ ret = reset_control_deassert(pcie->resets);
+ if (ret) {
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+ dev_err_probe(dev, ret, "failed to deassert resets\n");
+ }
+
+ return ret;
+}
+
+static void starfive_pcie_clk_rst_deinit(struct starfive_jh7110_pcie *pcie)
+{
+ reset_control_assert(pcie->resets);
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+}
+
+static bool starfive_pcie_link_up(struct plda_pcie_rp *plda)
+{
+ struct starfive_jh7110_pcie *pcie =
+ container_of(plda, struct starfive_jh7110_pcie, plda);
+ int ret;
+ u32 stg_reg_val;
+
+ ret = regmap_read(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_LNKSTA_OFFSET,
+ &stg_reg_val);
+ if (ret) {
+ dev_err(pcie->plda.dev, "failed to read link status\n");
+ return false;
+ }
+
+ return !!(stg_reg_val & DATA_LINK_ACTIVE);
+}
+
+static int starfive_pcie_host_wait_for_link(struct starfive_jh7110_pcie *pcie)
+{
+ int retries;
+
+ /* Check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (starfive_pcie_link_up(&pcie->plda)) {
+ dev_info(pcie->plda.dev, "port link up\n");
+ return 0;
+ }
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int starfive_pcie_enable_phy(struct device *dev,
+ struct starfive_jh7110_pcie *pcie)
+{
+ int ret;
+
+ if (!pcie->phy)
+ return 0;
+
+ ret = phy_init(pcie->phy);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to initialize pcie phy\n");
+
+ ret = phy_set_mode(pcie->phy, PHY_MODE_PCIE);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to set pcie mode\n");
+ goto err_phy_on;
+ }
+
+ ret = phy_power_on(pcie->phy);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to power on pcie phy\n");
+ goto err_phy_on;
+ }
+
+ return 0;
+
+err_phy_on:
+ phy_exit(pcie->phy);
+ return ret;
+}
+
+static void starfive_pcie_disable_phy(struct starfive_jh7110_pcie *pcie)
+{
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
+}
+
+static void starfive_pcie_host_deinit(struct plda_pcie_rp *plda)
+{
+ struct starfive_jh7110_pcie *pcie =
+ container_of(plda, struct starfive_jh7110_pcie, plda);
+
+ starfive_pcie_clk_rst_deinit(pcie);
+ if (pcie->power_gpio)
+ gpiod_set_value_cansleep(pcie->power_gpio, 0);
+ starfive_pcie_disable_phy(pcie);
+}
+
+static int starfive_pcie_host_init(struct plda_pcie_rp *plda)
+{
+ struct starfive_jh7110_pcie *pcie =
+ container_of(plda, struct starfive_jh7110_pcie, plda);
+ struct device *dev = plda->dev;
+ int ret;
+ int i;
+
+ ret = starfive_pcie_enable_phy(dev, pcie);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_RP_NEP_OFFSET,
+ STG_SYSCON_K_RP_NEP, STG_SYSCON_K_RP_NEP);
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_CKREF_SRC_MASK,
+ FIELD_PREP(STG_SYSCON_CKREF_SRC_MASK, 2));
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_CLKREQ, STG_SYSCON_CLKREQ);
+
+ ret = starfive_pcie_clk_rst_init(pcie);
+ if (ret)
+ return ret;
+
+ if (pcie->power_gpio)
+ gpiod_set_value_cansleep(pcie->power_gpio, 1);
+
+ if (pcie->reset_gpio)
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+
+ /* Disable physical functions except #0 */
+ for (i = 1; i < PCIE_FUNC_NUM; i++) {
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AR_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AR_MASK,
+ STG_SYSCON_AXI4_SLVL_PHY_AR(i));
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AW_MASK,
+ STG_SYSCON_AXI4_SLVL_PHY_AW(i));
+
+ plda_pcie_disable_func(plda);
+ }
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AR_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AR_MASK, 0);
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AW_MASK, 0);
+
+ plda_pcie_enable_root_port(plda);
+ plda_pcie_write_rc_bar(plda, 0);
+
+ /* PCIe PCI Standard Configuration Identification Settings. */
+ plda_pcie_set_standard_class(plda);
+
+ /*
+ * The LTR message receiving is enabled by the register "PCIe Message
+ * Reception" as default, but the forward id & addr are uninitialized.
+ * If we do not disable LTR message forwarding here, or set a legal
+ * forwarding address, the kernel will get stuck.
+ * To workaround, disable the LTR message forwarding here before using
+ * this feature.
+ */
+ plda_pcie_disable_ltr(plda);
+
+ /*
+ * Enable the prefetchable memory window 64-bit addressing in JH7110.
+ * The 64-bits prefetchable address translation configurations in ATU
+ * can be work after enable the register setting below.
+ */
+ plda_pcie_set_pref_win_64bit(plda);
+
+ /*
+ * Ensure that PERST has been asserted for at least 100 ms,
+ * the sleep value is T_PVPERL from PCIe CEM spec r2.0 (Table 2-4)
+ */
+ msleep(100);
+ if (pcie->reset_gpio)
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+
+ /*
+ * With a Downstream Port (<=5GT/s), software must wait a minimum
+ * of 100ms following exit from a conventional reset before
+ * sending a configuration request to the device.
+ */
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+
+ if (starfive_pcie_host_wait_for_link(pcie))
+ dev_info(dev, "port link down\n");
+
+ return 0;
+}
+
+static const struct plda_pcie_host_ops sf_host_ops = {
+ .host_init = starfive_pcie_host_init,
+ .host_deinit = starfive_pcie_host_deinit,
+};
+
+static const struct plda_event stf_pcie_event = {
+ .intx_event = EVENT_PM_MSI_INT_INTX,
+ .msi_event = EVENT_PM_MSI_INT_MSI
+};
+
+static int starfive_pcie_probe(struct platform_device *pdev)
+{
+ struct starfive_jh7110_pcie *pcie;
+ struct device *dev = &pdev->dev;
+ struct plda_pcie_rp *plda;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ plda = &pcie->plda;
+ plda->dev = dev;
+
+ ret = starfive_pcie_parse_dt(pcie, dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ plda->host_ops = &sf_host_ops;
+ plda->num_events = PLDA_MAX_EVENT_NUM;
+ /* mask doorbell event */
+ plda->events_bitmap = GENMASK(PLDA_INT_EVENT_NUM - 1, 0)
+ & ~BIT(PLDA_AXI_DOORBELL)
+ & ~BIT(PLDA_PCIE_DOORBELL);
+ plda->events_bitmap <<= PLDA_NUM_DMA_EVENTS;
+ ret = plda_pcie_host_init(&pcie->plda, &starfive_pcie_ops,
+ &stf_pcie_event);
+ if (ret) {
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pcie);
+
+ return 0;
+}
+
+static void starfive_pcie_remove(struct platform_device *pdev)
+{
+ struct starfive_jh7110_pcie *pcie = platform_get_drvdata(pdev);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ plda_pcie_host_deinit(&pcie->plda);
+ platform_set_drvdata(pdev, NULL);
+}
+
+static int starfive_pcie_suspend_noirq(struct device *dev)
+{
+ struct starfive_jh7110_pcie *pcie = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+ starfive_pcie_disable_phy(pcie);
+
+ return 0;
+}
+
+static int starfive_pcie_resume_noirq(struct device *dev)
+{
+ struct starfive_jh7110_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = starfive_pcie_enable_phy(dev, pcie);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks\n");
+ starfive_pcie_disable_phy(pcie);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops starfive_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(starfive_pcie_suspend_noirq,
+ starfive_pcie_resume_noirq)
+};
+
+static const struct of_device_id starfive_pcie_of_match[] = {
+ { .compatible = "starfive,jh7110-pcie", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, starfive_pcie_of_match);
+
+static struct platform_driver starfive_pcie_driver = {
+ .driver = {
+ .name = "pcie-starfive",
+ .of_match_table = of_match_ptr(starfive_pcie_of_match),
+ .pm = pm_sleep_ptr(&starfive_pcie_pm_ops),
+ },
+ .probe = starfive_pcie_probe,
+ .remove = starfive_pcie_remove,
+};
+module_platform_driver(starfive_pcie_driver);
+
+MODULE_DESCRIPTION("StarFive JH7110 PCIe host driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 769eedeb8802..ec6afc38e898 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -7,6 +7,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/msi.h>
@@ -17,6 +18,8 @@
#include <linux/rculist.h>
#include <linux/rcupdate.h>
+#include <xen/xen.h>
+
#include <asm/irqdomain.h>
#define VMD_CFGBAR 0
@@ -66,8 +69,23 @@ enum vmd_features {
* interrupt handling.
*/
VMD_FEAT_CAN_BYPASS_MSI_REMAP = (1 << 4),
+
+ /*
+ * Enable ASPM on the PCIE root ports and set the default LTR of the
+ * storage devices on platforms where these values are not configured by
+ * BIOS. This is needed for laptops, which require these settings for
+ * proper power management of the SoC.
+ */
+ VMD_FEAT_BIOS_PM_QUIRK = (1 << 5),
};
+#define VMD_BIOS_PM_QUIRK_LTR 0x1003 /* 3145728 ns */
+
+#define VMD_FEATS_CLIENT (VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | \
+ VMD_FEAT_HAS_BUS_RESTRICTIONS | \
+ VMD_FEAT_OFFSET_FIRST_VECTOR | \
+ VMD_FEAT_BIOS_PM_QUIRK)
+
static DEFINE_IDA(vmd_instance_ida);
/*
@@ -110,7 +128,7 @@ struct vmd_irq_list {
struct vmd_dev {
struct pci_dev *dev;
- spinlock_t cfg_lock;
+ raw_spinlock_t cfg_lock;
void __iomem *cfgbar;
int msix_count;
@@ -157,69 +175,63 @@ static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->arch_addr_lo.destid_0_7 = index_from_irqs(vmd, irq);
}
-/*
- * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
- */
static void vmd_irq_enable(struct irq_data *data)
{
struct vmd_irq *vmdirq = data->chip_data;
- unsigned long flags;
- raw_spin_lock_irqsave(&list_lock, flags);
- WARN_ON(vmdirq->enabled);
- list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
- vmdirq->enabled = true;
- raw_spin_unlock_irqrestore(&list_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &list_lock) {
+ WARN_ON(vmdirq->enabled);
+ list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
+ vmdirq->enabled = true;
+ }
+}
+static void vmd_pci_msi_enable(struct irq_data *data)
+{
+ vmd_irq_enable(data->parent_data);
data->chip->irq_unmask(data);
}
+static unsigned int vmd_pci_msi_startup(struct irq_data *data)
+{
+ vmd_pci_msi_enable(data);
+ return 0;
+}
+
static void vmd_irq_disable(struct irq_data *data)
{
struct vmd_irq *vmdirq = data->chip_data;
- unsigned long flags;
-
- data->chip->irq_mask(data);
- raw_spin_lock_irqsave(&list_lock, flags);
- if (vmdirq->enabled) {
- list_del_rcu(&vmdirq->node);
- vmdirq->enabled = false;
+ scoped_guard(raw_spinlock_irqsave, &list_lock) {
+ if (vmdirq->enabled) {
+ list_del_rcu(&vmdirq->node);
+ vmdirq->enabled = false;
+ }
}
- raw_spin_unlock_irqrestore(&list_lock, flags);
}
-/*
- * XXX: Stubbed until we develop acceptable way to not create conflicts with
- * other devices sharing the same vector.
- */
-static int vmd_irq_set_affinity(struct irq_data *data,
- const struct cpumask *dest, bool force)
+static void vmd_pci_msi_disable(struct irq_data *data)
+{
+ data->chip->irq_mask(data);
+ vmd_irq_disable(data->parent_data);
+}
+
+static void vmd_pci_msi_shutdown(struct irq_data *data)
{
- return -EINVAL;
+ vmd_pci_msi_disable(data);
}
static struct irq_chip vmd_msi_controller = {
.name = "VMD-MSI",
- .irq_enable = vmd_irq_enable,
- .irq_disable = vmd_irq_disable,
.irq_compose_msi_msg = vmd_compose_msi_msg,
- .irq_set_affinity = vmd_irq_set_affinity,
};
-static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
- msi_alloc_info_t *arg)
-{
- return 0;
-}
-
/*
* XXX: We can be even smarter selecting the best IRQ once we solve the
* affinity problem.
*/
static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
{
- unsigned long flags;
int i, best;
if (vmd->msix_count == 1 + vmd->first_vec)
@@ -236,113 +248,130 @@ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *d
return &vmd->irqs[vmd->first_vec];
}
- raw_spin_lock_irqsave(&list_lock, flags);
- best = vmd->first_vec + 1;
- for (i = best; i < vmd->msix_count; i++)
- if (vmd->irqs[i].count < vmd->irqs[best].count)
- best = i;
- vmd->irqs[best].count++;
- raw_spin_unlock_irqrestore(&list_lock, flags);
+ scoped_guard(raw_spinlock_irq, &list_lock) {
+ best = vmd->first_vec + 1;
+ for (i = best; i < vmd->msix_count; i++)
+ if (vmd->irqs[i].count < vmd->irqs[best].count)
+ best = i;
+ vmd->irqs[best].count++;
+ }
return &vmd->irqs[best];
}
-static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
- unsigned int virq, irq_hw_number_t hwirq,
- msi_alloc_info_t *arg)
+static void vmd_msi_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs);
+
+static int vmd_msi_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
{
- struct msi_desc *desc = arg->desc;
- struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
- struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
+ struct msi_desc *desc = ((msi_alloc_info_t *)arg)->desc;
+ struct vmd_dev *vmd = domain->host_data;
+ struct vmd_irq *vmdirq;
- if (!vmdirq)
- return -ENOMEM;
+ for (int i = 0; i < nr_irqs; ++i) {
+ vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
+ if (!vmdirq) {
+ vmd_msi_free(domain, virq, i);
+ return -ENOMEM;
+ }
- INIT_LIST_HEAD(&vmdirq->node);
- vmdirq->irq = vmd_next_irq(vmd, desc);
- vmdirq->virq = virq;
+ INIT_LIST_HEAD(&vmdirq->node);
+ vmdirq->irq = vmd_next_irq(vmd, desc);
+ vmdirq->virq = virq + i;
+
+ irq_domain_set_info(domain, virq + i, vmdirq->irq->virq,
+ &vmd_msi_controller, vmdirq,
+ handle_untracked_irq, vmd, NULL);
+ }
- irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq,
- handle_untracked_irq, vmd, NULL);
return 0;
}
-static void vmd_msi_free(struct irq_domain *domain,
- struct msi_domain_info *info, unsigned int virq)
+static void vmd_msi_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
{
- struct vmd_irq *vmdirq = irq_get_chip_data(virq);
- unsigned long flags;
-
- synchronize_srcu(&vmdirq->irq->srcu);
-
- /* XXX: Potential optimization to rebalance */
- raw_spin_lock_irqsave(&list_lock, flags);
- vmdirq->irq->count--;
- raw_spin_unlock_irqrestore(&list_lock, flags);
+ struct irq_data *irq_data;
+ struct vmd_irq *vmdirq;
- kfree(vmdirq);
-}
+ for (int i = 0; i < nr_irqs; ++i) {
+ irq_data = irq_domain_get_irq_data(domain, virq + i);
+ vmdirq = irq_data->chip_data;
-static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
- int nvec, msi_alloc_info_t *arg)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
+ synchronize_srcu(&vmdirq->irq->srcu);
- if (nvec > vmd->msix_count)
- return vmd->msix_count;
+ /* XXX: Potential optimization to rebalance */
+ scoped_guard(raw_spinlock_irq, &list_lock)
+ vmdirq->irq->count--;
- memset(arg, 0, sizeof(*arg));
- return 0;
+ kfree(vmdirq);
+ }
}
-static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+static const struct irq_domain_ops vmd_msi_domain_ops = {
+ .alloc = vmd_msi_alloc,
+ .free = vmd_msi_free,
+};
+
+static bool vmd_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent,
+ struct msi_domain_info *info)
{
- arg->desc = desc;
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+ return false;
+
+ info->chip->irq_startup = vmd_pci_msi_startup;
+ info->chip->irq_shutdown = vmd_pci_msi_shutdown;
+ info->chip->irq_enable = vmd_pci_msi_enable;
+ info->chip->irq_disable = vmd_pci_msi_disable;
+ return true;
}
-static struct msi_domain_ops vmd_msi_domain_ops = {
- .get_hwirq = vmd_get_hwirq,
- .msi_init = vmd_msi_init,
- .msi_free = vmd_msi_free,
- .msi_prepare = vmd_msi_prepare,
- .set_desc = vmd_set_desc,
-};
+#define VMD_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | MSI_FLAG_PCI_MSIX)
+#define VMD_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_NO_AFFINITY)
-static struct msi_domain_info vmd_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX,
- .ops = &vmd_msi_domain_ops,
- .chip = &vmd_msi_controller,
+static const struct msi_parent_ops vmd_msi_parent_ops = {
+ .supported_flags = VMD_MSI_FLAGS_SUPPORTED,
+ .required_flags = VMD_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_VMD_MSI,
+ .bus_select_mask = MATCH_PCI_MSI,
+ .prefix = "VMD-",
+ .init_dev_msi_info = vmd_init_dev_msi_info,
};
-static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
-{
- u16 reg;
-
- pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, &reg);
- reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
- (reg | VMCONFIG_MSI_REMAP);
- pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
-}
-
static int vmd_create_irq_domain(struct vmd_dev *vmd)
{
- struct fwnode_handle *fn;
+ struct irq_domain_info info = {
+ .size = vmd->msix_count,
+ .ops = &vmd_msi_domain_ops,
+ .host_data = vmd,
+ };
- fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
- if (!fn)
+ info.fwnode = irq_domain_alloc_named_id_fwnode("VMD-MSI",
+ vmd->sysdata.domain);
+ if (!info.fwnode)
return -ENODEV;
- vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, NULL);
+ vmd->irq_domain = msi_create_parent_irq_domain(&info,
+ &vmd_msi_parent_ops);
if (!vmd->irq_domain) {
- irq_domain_free_fwnode(fn);
+ irq_domain_free_fwnode(info.fwnode);
return -ENODEV;
}
return 0;
}
+static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
+{
+ u16 reg;
+
+ pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, &reg);
+ reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
+ (reg | VMCONFIG_MSI_REMAP);
+ pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
+}
+
static void vmd_remove_irq_domain(struct vmd_dev *vmd)
{
/*
@@ -381,29 +410,24 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
{
struct vmd_dev *vmd = vmd_from_bus(bus);
void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
- unsigned long flags;
- int ret = 0;
if (!addr)
return -EFAULT;
- spin_lock_irqsave(&vmd->cfg_lock, flags);
+ guard(raw_spinlock_irqsave)(&vmd->cfg_lock);
switch (len) {
case 1:
*value = readb(addr);
- break;
+ return 0;
case 2:
*value = readw(addr);
- break;
+ return 0;
case 4:
*value = readl(addr);
- break;
+ return 0;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- spin_unlock_irqrestore(&vmd->cfg_lock, flags);
- return ret;
}
/*
@@ -416,32 +440,27 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
{
struct vmd_dev *vmd = vmd_from_bus(bus);
void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
- unsigned long flags;
- int ret = 0;
if (!addr)
return -EFAULT;
- spin_lock_irqsave(&vmd->cfg_lock, flags);
+ guard(raw_spinlock_irqsave)(&vmd->cfg_lock);
switch (len) {
case 1:
writeb(value, addr);
readb(addr);
- break;
+ return 0;
case 2:
writew(value, addr);
readw(addr);
- break;
+ return 0;
case 4:
writel(value, addr);
readl(addr);
- break;
+ return 0;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- spin_unlock_irqrestore(&vmd->cfg_lock, flags);
- return ret;
}
static struct pci_ops vmd_ops = {
@@ -510,10 +529,9 @@ static void vmd_domain_reset(struct vmd_dev *vmd)
base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
PCI_DEVFN(dev, 0), 0);
- hdr_type = readb(base + PCI_HEADER_TYPE) &
- PCI_HEADER_TYPE_MASK;
+ hdr_type = readb(base + PCI_HEADER_TYPE);
- functions = (hdr_type & 0x80) ? 8 : 1;
+ functions = (hdr_type & PCI_HEADER_TYPE_MFD) ? 8 : 1;
for (fn = 0; fn < functions; fn++) {
base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
PCI_DEVFN(dev, fn), 0);
@@ -526,8 +544,23 @@ static void vmd_domain_reset(struct vmd_dev *vmd)
PCI_CLASS_BRIDGE_PCI))
continue;
- memset_io(base + PCI_IO_BASE, 0,
- PCI_ROM_ADDRESS1 - PCI_IO_BASE);
+ /*
+ * Temporarily disable the I/O range before updating
+ * PCI_IO_BASE.
+ */
+ writel(0x0000ffff, base + PCI_IO_BASE_UPPER16);
+ /* Update lower 16 bits of I/O base/limit */
+ writew(0x00f0, base + PCI_IO_BASE);
+ /* Update upper 16 bits of I/O base/limit */
+ writel(0, base + PCI_IO_BASE_UPPER16);
+
+ /* MMIO Base/Limit */
+ writel(0x0000fff0, base + PCI_MEMORY_BASE);
+
+ /* Prefetchable MMIO Base/Limit */
+ writel(0, base + PCI_PREF_LIMIT_UPPER32);
+ writel(0x0000fff0, base + PCI_PREF_MEMORY_BASE);
+ writel(0xffffffff, base + PCI_PREF_BASE_UPPER32);
}
}
}
@@ -545,22 +578,6 @@ static void vmd_detach_resources(struct vmd_dev *vmd)
vmd->dev->resource[VMD_MEMBAR2].child = NULL;
}
-/*
- * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
- * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower
- * 16 bits are the PCI Segment Group (domain) number. Other bits are
- * currently reserved.
- */
-static int vmd_find_free_domain(void)
-{
- int domain = 0xffff;
- struct pci_bus *bus = NULL;
-
- while ((bus = pci_find_next_bus(bus)) != NULL)
- domain = max_t(int, domain, pci_domain_nr(bus));
- return domain + 1;
-}
-
static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint,
resource_size_t *offset1,
resource_size_t *offset2)
@@ -709,6 +726,51 @@ static void vmd_copy_host_bridge_flags(struct pci_host_bridge *root_bridge,
vmd_bridge->native_dpc = root_bridge->native_dpc;
}
+/*
+ * Enable ASPM and LTR settings on devices that aren't configured by BIOS.
+ */
+static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
+{
+ unsigned long features = *(unsigned long *)userdata;
+ u16 ltr = VMD_BIOS_PM_QUIRK_LTR;
+ u32 ltr_reg;
+ int pos;
+
+ if (!(features & VMD_FEAT_BIOS_PM_QUIRK))
+ return 0;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
+ if (!pos)
+ goto out_state_change;
+
+ /*
+ * Skip if the max snoop LTR is non-zero, indicating BIOS has set it
+ * so the LTR quirk is not needed.
+ */
+ pci_read_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, &ltr_reg);
+ if (!!(ltr_reg & (PCI_LTR_VALUE_MASK | PCI_LTR_SCALE_MASK)))
+ goto out_state_change;
+
+ /*
+ * Set the default values to the maximum required by the platform to
+ * allow the deepest power management savings. Write as a DWORD where
+ * the lower word is the max snoop latency and the upper word is the
+ * max non-snoop latency.
+ */
+ ltr_reg = (ltr << 16) | ltr;
+ pci_write_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, ltr_reg);
+ pci_info(pdev, "VMD: Default LTR value set by driver\n");
+
+out_state_change:
+ /*
+ * Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
+ * PCIe r6.0, sec 5.5.4.
+ */
+ pci_set_power_state_locked(pdev, PCI_D0);
+ pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
+ return 0;
+}
+
static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
{
struct pci_sysdata *sd = &vmd->sysdata;
@@ -800,13 +862,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
.parent = res,
};
- sd->vmd_dev = vmd->dev;
- sd->domain = vmd_find_free_domain();
- if (sd->domain < 0)
- return sd->domain;
-
- sd->node = pcibus_to_node(vmd->dev->bus);
-
/*
* Currently MSI remapping must be enabled in guest passthrough mode
* due to some missing interrupt remapping plumbing. This is probably
@@ -824,12 +879,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
ret = vmd_create_irq_domain(vmd);
if (ret)
return ret;
-
- /*
- * Override the IRQ domain bus token so the domain can be
- * distinguished from a regular PCI/MSI domain.
- */
- irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI);
} else {
vmd_set_msi_remapping(vmd, false);
}
@@ -838,9 +887,24 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
+ sd->vmd_dev = vmd->dev;
+
+ /*
+ * Emulated domains start at 0x10000 to not clash with ACPI _SEG
+ * domains. Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of
+ * which the lower 16 bits are the PCI Segment Group (domain) number.
+ * Other bits are currently reserved.
+ */
+ sd->domain = pci_bus_find_emul_domain_nr(0, 0x10000, INT_MAX);
+ if (sd->domain < 0)
+ return sd->domain;
+
+ sd->node = pcibus_to_node(vmd->dev->bus);
+
vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
&vmd_ops, sd, &resources);
if (!vmd->bus) {
+ pci_bus_release_emul_domain_nr(sd->domain);
pci_free_resource_list(&resources);
vmd_remove_irq_domain(vmd);
return -ENODEV;
@@ -856,6 +920,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
dev_set_msi_domain(&vmd->bus->dev,
dev_get_msi_domain(&vmd->dev->dev));
+ WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
+ "domain"), "Can't create symlink to domain\n");
+
vmd_acpi_begin();
pci_scan_child_bus(vmd->bus);
@@ -872,7 +939,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
if (!list_empty(&child->devices)) {
dev = list_first_entry(&child->devices,
struct pci_dev, bus_list);
- if (pci_reset_bus(dev))
+ ret = pci_reset_bus(dev);
+ if (ret)
pci_warn(dev, "can't reset device: %d\n", ret);
break;
@@ -881,6 +949,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
pci_assign_unassigned_bus_resources(vmd->bus);
+ pci_walk_bus(vmd->bus, vmd_pm_enable_quirk, &features);
+
/*
* VMD root buses are virtual and don't return true on pci_is_pcie()
* and will fail pcie_bus_configure_settings() early. It can instead be
@@ -892,9 +962,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
pci_bus_add_devices(vmd->bus);
vmd_acpi_end();
-
- WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
- "domain"), "Can't create symlink to domain\n");
return 0;
}
@@ -904,6 +971,24 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct vmd_dev *vmd;
int err;
+ if (xen_domain()) {
+ /*
+ * Xen doesn't have knowledge about devices in the VMD bus
+ * because the config space of devices behind the VMD bridge is
+ * not known to Xen, and hence Xen cannot discover or configure
+ * them in any way.
+ *
+ * Bypass of MSI remapping won't work in that case as direct
+ * write by Linux to the MSI entries won't result in functional
+ * interrupts, as Xen is the entity that manages the host
+ * interrupt controller and must configure interrupts. However
+ * multiplexing of interrupts by the VMD bridge will work under
+ * Xen, so force the usage of that mode which must always be
+ * supported by VMD bridges.
+ */
+ features &= ~VMD_FEAT_CAN_BYPASS_MSI_REMAP;
+ }
+
if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
return -ENOMEM;
@@ -912,7 +997,8 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENOMEM;
vmd->dev = dev;
- vmd->instance = ida_simple_get(&vmd_instance_ida, 0, 0, GFP_KERNEL);
+ vmd->sysdata.domain = PCI_DOMAIN_NR_NOT_SET;
+ vmd->instance = ida_alloc(&vmd_instance_ida, GFP_KERNEL);
if (vmd->instance < 0)
return vmd->instance;
@@ -943,7 +1029,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (features & VMD_FEAT_OFFSET_FIRST_VECTOR)
vmd->first_vec = 1;
- spin_lock_init(&vmd->cfg_lock);
+ raw_spin_lock_init(&vmd->cfg_lock);
pci_set_drvdata(dev, vmd);
err = vmd_enable_domain(vmd, features);
if (err)
@@ -954,7 +1040,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
return 0;
out_release_instance:
- ida_simple_remove(&vmd_instance_ida, vmd->instance);
+ ida_free(&vmd_instance_ida, vmd->instance);
return err;
}
@@ -970,13 +1056,21 @@ static void vmd_remove(struct pci_dev *dev)
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
- sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
+ sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_remove_root_bus(vmd->bus);
vmd_cleanup_srcu(vmd);
vmd_detach_resources(vmd);
vmd_remove_irq_domain(vmd);
- ida_simple_remove(&vmd_instance_ida, vmd->instance);
+ ida_free(&vmd_instance_ida, vmd->instance);
+ pci_bus_release_emul_domain_nr(vmd->sysdata.domain);
+}
+
+static void vmd_shutdown(struct pci_dev *dev)
+{
+ struct vmd_dev *vmd = pci_get_drvdata(dev);
+
+ vmd_remove_irq_domain(vmd);
}
#ifdef CONFIG_PM_SLEEP
@@ -998,10 +1092,7 @@ static int vmd_resume(struct device *dev)
struct vmd_dev *vmd = pci_get_drvdata(pdev);
int err, i;
- if (vmd->irq_domain)
- vmd_set_msi_remapping(vmd, true);
- else
- vmd_set_msi_remapping(vmd, false);
+ vmd_set_msi_remapping(vmd, !!vmd->irq_domain);
for (i = 0; i < vmd->msix_count; i++) {
err = devm_request_irq(dev, vmd->irqs[i].virq,
@@ -1017,36 +1108,30 @@ static int vmd_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
static const struct pci_device_id vmd_ids[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
VMD_FEAT_CAN_BYPASS_MSI_REMAP,},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f),
- .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
- VMD_FEAT_HAS_BUS_RESTRICTIONS |
- VMD_FEAT_OFFSET_FIRST_VECTOR,},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c3d),
- .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
- VMD_FEAT_HAS_BUS_RESTRICTIONS |
- VMD_FEAT_OFFSET_FIRST_VECTOR,},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa77f),
- .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
- VMD_FEAT_HAS_BUS_RESTRICTIONS |
- VMD_FEAT_OFFSET_FIRST_VECTOR,},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7d0b),
- .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
- VMD_FEAT_HAS_BUS_RESTRICTIONS |
- VMD_FEAT_OFFSET_FIRST_VECTOR,},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xad0b),
- .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
- VMD_FEAT_HAS_BUS_RESTRICTIONS |
- VMD_FEAT_OFFSET_FIRST_VECTOR,},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
- .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
- VMD_FEAT_HAS_BUS_RESTRICTIONS |
- VMD_FEAT_OFFSET_FIRST_VECTOR,},
+ {PCI_VDEVICE(INTEL, 0x467f),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0x4c3d),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xa77f),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0x7d0b),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xad0b),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xb60b),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xb06f),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xb07f),
+ .driver_data = VMD_FEATS_CLIENT,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);
@@ -1056,6 +1141,7 @@ static struct pci_driver vmd_drv = {
.id_table = vmd_ids,
.probe = vmd_probe,
.remove = vmd_remove,
+ .shutdown = vmd_shutdown,
.driver = {
.pm = &vmd_dev_pm_ops,
},
@@ -1063,5 +1149,6 @@ static struct pci_driver vmd_drv = {
module_pci_driver(vmd_drv);
MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Volume Management Device driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.6");